From 7299e3bdcdae5bd7dacd08be67f706fdbaab8543 Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Wed, 20 Apr 2022 12:14:33 -0600 Subject: [PATCH 001/223] Fix query filtering (#5951) - milli -> nano conversion in the querier - add deletes to store requests from the ingester --- pkg/ingester/ingester.go | 2 ++ pkg/querier/querier.go | 6 +++--- pkg/querier/querier_test.go | 20 ++++++++++---------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 25b6cfe6d3b87..70aca05764fe8 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -592,6 +592,7 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie End: end, Limit: req.Limit, Shards: req.Shards, + Deletes: req.Deletes, }} storeItr, err := i.store.SelectLogs(ctx, storeReq) if err != nil { @@ -628,6 +629,7 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log End: end, Selector: req.Selector, Shards: req.Shards, + Deletes: req.Deletes, }} storeItr, err := i.store.SelectSamples(ctx, storeReq) if err != nil { diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 66886b468415e..1beb98970c0ab 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -225,11 +225,11 @@ func (q *SingleTenantQuerier) deletesForUser(ctx context.Context, startT, endT t var deletes []*logproto.Delete for _, del := range d { - if int64(del.StartTime) <= end && int64(del.EndTime) >= start { + if del.StartTime.UnixNano() <= end && del.EndTime.UnixNano() >= start { deletes = append(deletes, &logproto.Delete{ Selector: del.Query, - Start: int64(del.StartTime), - End: int64(del.EndTime), + Start: del.StartTime.UnixNano(), + End: del.EndTime.UnixNano(), }) } } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 1242a4e4c1632..c9a080ea032ca 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -761,8 +761,8 @@ func TestQuerier_SelectLogWithDeletes(t *testing.T) { request := logproto.QueryRequest{ Selector: `{type="test"} |= "foo"`, Limit: 10, - Start: time.Unix(0, 300), - End: time.Unix(0, 600), + Start: time.Unix(0, 300000000), + End: time.Unix(0, 600000000), Direction: logproto.FORWARD, } @@ -776,9 +776,9 @@ func TestQuerier_SelectLogWithDeletes(t *testing.T) { End: request.End, Direction: request.Direction, Deletes: []*logproto.Delete{ - {Selector: "1", Start: 200, End: 400}, - {Selector: "2", Start: 400, End: 500}, - {Selector: "3", Start: 500, End: 700}, + {Selector: "1", Start: 200000000, End: 400000000}, + {Selector: "2", Start: 400000000, End: 500000000}, + {Selector: "3", Start: 500000000, End: 700000000}, }, } @@ -822,8 +822,8 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) { request := logproto.SampleQueryRequest{ Selector: `count_over_time({foo="bar"}[5m])`, - Start: time.Unix(0, 300), - End: time.Unix(0, 600), + Start: time.Unix(0, 300000000), + End: time.Unix(0, 600000000), } _, err = q.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: &request}) @@ -835,9 +835,9 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) { Start: request.Start, End: request.End, Deletes: []*logproto.Delete{ - {Selector: "1", Start: 200, End: 400}, - {Selector: "2", Start: 400, End: 500}, - {Selector: "3", Start: 500, End: 700}, + {Selector: "1", Start: 200000000, End: 400000000}, + {Selector: "2", Start: 400000000, End: 500000000}, + {Selector: "3", Start: 500000000, End: 700000000}, }, }, } From ef56b7375820d49d6360042cb80b6c99a268fdf8 Mon Sep 17 00:00:00 2001 From: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> Date: Thu, 21 Apr 2022 06:58:36 +0100 Subject: [PATCH 002/223] Add query filtering to the tailer (#5969) * Add some comments Signed-off-by: Michel Hollands * Use deletes in tail Signed-off-by: Michel Hollands * Change error message Signed-off-by: Michel Hollands --- pkg/querier/querier.go | 6 ++++++ .../stores/shipper/compactor/deletion/request_handler.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 1beb98970c0ab..6cf307120ac18 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -369,6 +369,11 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques return nil, err } + deletes, err := q.deletesForUser(ctx, req.Start, time.Now()) + if err != nil { + return nil, err + } + histReq := logql.SelectLogParams{ QueryRequest: &logproto.QueryRequest{ Selector: req.Query, @@ -376,6 +381,7 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques End: time.Now(), Limit: req.Limit, Direction: logproto.BACKWARD, + Deletes: deletes, }, } diff --git a/pkg/storage/stores/shipper/compactor/deletion/request_handler.go b/pkg/storage/stores/shipper/compactor/deletion/request_handler.go index 3e040efb5bfca..4c1a6aef8d3cb 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/request_handler.go +++ b/pkg/storage/stores/shipper/compactor/deletion/request_handler.go @@ -77,7 +77,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r } if endTime > int64(model.Now()) { - http.Error(w, "deletes in future not allowed", http.StatusBadRequest) + http.Error(w, "deletes in the future are not allowed", http.StatusBadRequest) return } } From c700744a7ddb753abbc8f363b3b98992963ebc91 Mon Sep 17 00:00:00 2001 From: irizzant Date: Thu, 21 Apr 2022 07:59:46 +0200 Subject: [PATCH 003/223] Loki: Make DNS resolver configurable in Gateway (#5973) * fix(gateway): make DNS resolver configurable Signed-off-by: Ivan Rizzante * fix: lint --- production/ksonnet/loki/config.libsonnet | 3 +++ production/ksonnet/loki/gateway.libsonnet | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index 71a65976fbd9f..7d9f5a050b152 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -92,6 +92,9 @@ dynamodb_secret_access_key: '', dynamodb_region: error 'must specify dynamodb_region', + // DNS Resolver + dns_resolver: 'kube-dns.kube-system.svc.cluster.local', + client_configs: { dynamo: { dynamodb: {} + if $._config.dynamodb_access_key != '' then { diff --git a/production/ksonnet/loki/gateway.libsonnet b/production/ksonnet/loki/gateway.libsonnet index e99628e2fe7f8..e02f3d84fd41c 100644 --- a/production/ksonnet/loki/gateway.libsonnet +++ b/production/ksonnet/loki/gateway.libsonnet @@ -39,7 +39,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; access_log /dev/stderr main; sendfile on; tcp_nopush on; - resolver kube-dns.kube-system.svc.cluster.local; + resolver $(dns_resolver)s; server { listen 80; From c0f51b9851c45ecc7ee18e2839cdd4d93d02f772 Mon Sep 17 00:00:00 2001 From: afayngelerindbx <38990103+afayngelerindbx@users.noreply.github.com> Date: Thu, 21 Apr 2022 02:04:09 -0400 Subject: [PATCH 004/223] redact s3 credential values when printing config to logs (#5961) --- pkg/loki/config_wrapper_test.go | 12 +++--- .../chunk/client/aws/s3_storage_client.go | 32 ++++++++++++---- .../client/aws/s3_storage_client_test.go | 37 +++++++++++++++++-- 3 files changed, 63 insertions(+), 18 deletions(-) diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index 82547b38ab824..d0150c0c6d8e9 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -267,8 +267,8 @@ memberlist: assert.Equal(t, false, actual.S3ForcePathStyle) assert.Equal(t, "s3://foo-bucket", actual.Endpoint) assert.Equal(t, "us-east1", actual.Region) - assert.Equal(t, "abc123", actual.AccessKeyID) - assert.Equal(t, "def789", actual.SecretAccessKey) + assert.Equal(t, "abc123", actual.AccessKeyID.Value) + assert.Equal(t, "def789", actual.SecretAccessKey.Value) assert.Equal(t, true, actual.Insecure) assert.Equal(t, false, actual.SSEEncryption) assert.Equal(t, 5*time.Minute, actual.HTTPConfig.ResponseHeaderTimeout) @@ -490,8 +490,8 @@ ruler: assert.Equal(t, "s3", config.Ruler.StoreConfig.Type) assert.Equal(t, "s3://foo-bucket", config.Ruler.StoreConfig.S3.Endpoint) assert.Equal(t, "us-east1", config.Ruler.StoreConfig.S3.Region) - assert.Equal(t, "abc123", config.Ruler.StoreConfig.S3.AccessKeyID) - assert.Equal(t, "def789", config.Ruler.StoreConfig.S3.SecretAccessKey) + assert.Equal(t, "abc123", config.Ruler.StoreConfig.S3.AccessKeyID.Value) + assert.Equal(t, "def789", config.Ruler.StoreConfig.S3.SecretAccessKey.Value) // should be set by common config assert.EqualValues(t, "foobar", config.StorageConfig.GCSConfig.BucketName) @@ -520,8 +520,8 @@ storage_config: assert.Equal(t, "s3://foo-bucket", config.StorageConfig.AWSStorageConfig.S3Config.Endpoint) assert.Equal(t, "us-east1", config.StorageConfig.AWSStorageConfig.S3Config.Region) - assert.Equal(t, "abc123", config.StorageConfig.AWSStorageConfig.S3Config.AccessKeyID) - assert.Equal(t, "def789", config.StorageConfig.AWSStorageConfig.S3Config.SecretAccessKey) + assert.Equal(t, "abc123", config.StorageConfig.AWSStorageConfig.S3Config.AccessKeyID.Value) + assert.Equal(t, "def789", config.StorageConfig.AWSStorageConfig.S3Config.SecretAccessKey.Value) // should be set by common config assert.EqualValues(t, "foobar", config.Ruler.StoreConfig.GCS.BucketName) diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go index 4499520833725..badf31bb59e9c 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client.go @@ -14,6 +14,8 @@ import ( "strings" "time" + "gopkg.in/yaml.v2" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -61,6 +63,20 @@ func init() { s3RequestDuration.Register() } +type secret struct { + Value string +} + +func (secret) MarshalYAML() (interface{}, error) { + return "redacted", nil +} + +func (s *secret) UnmarshalYAML(unmarshal func(interface{}) error) error { + return unmarshal(&s.Value) +} + +var _ yaml.Marshaler = secret{} + // S3Config specifies config for storing chunks on AWS S3. type S3Config struct { S3 flagext.URLValue @@ -69,8 +85,8 @@ type S3Config struct { BucketNames string Endpoint string `yaml:"endpoint"` Region string `yaml:"region"` - AccessKeyID string `yaml:"access_key_id"` - SecretAccessKey string `yaml:"secret_access_key"` + AccessKeyID secret `yaml:"access_key_id"` + SecretAccessKey secret `yaml:"secret_access_key"` Insecure bool `yaml:"insecure"` SSEEncryption bool `yaml:"sse_encryption"` HTTPConfig HTTPConfig `yaml:"http_config"` @@ -103,8 +119,8 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "S3 Endpoint to connect to.") f.StringVar(&cfg.Region, prefix+"s3.region", "", "AWS region to use.") - f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "AWS Access Key ID") - f.StringVar(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "", "AWS Secret Access Key") + f.StringVar(&cfg.AccessKeyID.Value, prefix+"s3.access-key-id", "", "AWS Access Key ID") + f.StringVar(&cfg.SecretAccessKey.Value, prefix+"s3.secret-access-key", "", "AWS Secret Access Key") f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "Disable https on s3 connection.") // TODO Remove in Cortex 1.10.0 @@ -237,13 +253,13 @@ func buildS3Client(cfg S3Config, hedgingCfg hedging.Config, hedging bool) (*s3.S s3Config = s3Config.WithRegion(cfg.Region) } - if cfg.AccessKeyID != "" && cfg.SecretAccessKey == "" || - cfg.AccessKeyID == "" && cfg.SecretAccessKey != "" { + if cfg.AccessKeyID.Value != "" && cfg.SecretAccessKey.Value == "" || + cfg.AccessKeyID.Value == "" && cfg.SecretAccessKey.Value != "" { return nil, errors.New("must supply both an Access Key ID and Secret Access Key or neither") } - if cfg.AccessKeyID != "" && cfg.SecretAccessKey != "" { - creds := credentials.NewStaticCredentials(cfg.AccessKeyID, cfg.SecretAccessKey, "") + if cfg.AccessKeyID.Value != "" && cfg.SecretAccessKey.Value != "" { + creds := credentials.NewStaticCredentials(cfg.AccessKeyID.Value, cfg.SecretAccessKey.Value, "") s3Config = s3Config.WithCredentials(creds) } diff --git a/pkg/storage/chunk/client/aws/s3_storage_client_test.go b/pkg/storage/chunk/client/aws/s3_storage_client_test.go index 66ef3722d9dc5..f61cbeeb5019b 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client_test.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client_test.go @@ -12,6 +12,8 @@ import ( "testing" "time" + "gopkg.in/yaml.v2" + "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -37,8 +39,8 @@ func TestRequestMiddleware(t *testing.T) { BucketNames: "buck-o", S3ForcePathStyle: true, Insecure: true, - AccessKeyID: "key", - SecretAccessKey: "secret", + AccessKeyID: secret{"key"}, + SecretAccessKey: secret{"secret"}, } tests := []struct { @@ -126,8 +128,8 @@ func Test_Hedging(t *testing.T) { count := atomic.NewInt32(0) c, err := NewS3ObjectClient(S3Config{ - AccessKeyID: "foo", - SecretAccessKey: "bar", + AccessKeyID: secret{"foo"}, + SecretAccessKey: secret{"bar"}, BackoffConfig: backoff.Config{MaxRetries: 1}, BucketNames: "foo", Inject: func(next http.RoundTripper) http.RoundTripper { @@ -148,3 +150,30 @@ func Test_Hedging(t *testing.T) { }) } } + +func Test_ConfigRedactsCredentials(t *testing.T) { + underTest := S3Config{ + AccessKeyID: secret{"secret key id"}, + SecretAccessKey: secret{"secret access key"}, + } + + output, err := yaml.Marshal(underTest) + require.NoError(t, err) + + require.False(t, bytes.Contains(output, []byte("secret key id"))) + require.False(t, bytes.Contains(output, []byte("secret access id"))) +} + +func Test_ConfigParsesCredentialsInline(t *testing.T) { + var underTest = S3Config{} + yamlCfg := ` +access_key_id: access key id +secret_access_key: secret access key +` + err := yaml.Unmarshal([]byte(yamlCfg), &underTest) + require.NoError(t, err) + + require.Equal(t, underTest.AccessKeyID.Value, "access key id") + require.Equal(t, underTest.SecretAccessKey.Value, "secret access key") + +} From ea59e4bdc48ebc9c9ff17a9bf9dc01e6833f07fe Mon Sep 17 00:00:00 2001 From: Susana Ferreira Date: Thu, 21 Apr 2022 08:04:51 +0200 Subject: [PATCH 005/223] Add changelog entry for split by range of instant queries (#5966) --- CHANGELOG.md | 1 + pkg/logql/rangemapper.go | 7 ++----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16d41db689089..a1799c097baf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ * [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4. * [5715](https://github.com/grafana/loki/pull/5715) **chaudum** Add option to push RFC5424 syslog messages from Promtail in syslog scrape target. * [5696](https://github.com/grafana/loki/pull/5696) **paullryan** don't block scraping of new logs from cloudflare within promtail if an error is received from cloudflare about too early logs. +* [5662](https://github.com/grafana/loki/pull/5662) **ssncferreira** **chaudum** Improve performance of instant queries by splitting range into multiple subqueries that are executed in parallel. * [5685](https://github.com/grafana/loki/pull/5625) **chaudum** Fix bug in push request parser that allowed users to send arbitrary non-string data as "log line". * [5707](https://github.com/grafana/loki/pull/5707) **franzwong** Promtail: Rename config name limit_config to limits_config. * [5626](https://github.com/grafana/loki/pull/5626) **jeschkies** Support multi-tenant select logs and samples queries. diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go index 1051c73cf7d99..f7ac61d824880 100644 --- a/pkg/logql/rangemapper.go +++ b/pkg/logql/rangemapper.go @@ -47,7 +47,7 @@ var splittableRangeVectorOp = map[string]struct{}{ // that are merged with vector aggregation using "without" and then aggregated // using the vector aggregation with the same operator, // either with or without grouping. -// 5) Left and right hand side of binary operations are split individually +// 5) Left and right-hand side of binary operations are split individually // using the same rules as above. type RangeMapper struct { splitByInterval time.Duration @@ -263,7 +263,6 @@ func (m RangeMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr // in case the interval is smaller than the configured split interval, // don't split it. - // TODO: what if there is another internal expr with an interval that can be split? if rangeInterval <= m.splitByInterval { return expr, nil } @@ -272,8 +271,6 @@ func (m RangeMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr // we can push down the outer vector aggregation to the downstream query. // This does not work for `count()` and `topk()`, though. // We also do not want to push down, if the inner expression is a binary operation. - // TODO: Currently, it is sending the last inner expression grouping dowstream. - // Which grouping should be sent downstream? var vectorAggrPushdown *syntax.VectorAggregationExpr if _, ok := expr.Left.(*syntax.BinOpExpr); !ok && expr.Operation != syntax.OpTypeCount && expr.Operation != syntax.OpTypeTopK { vectorAggrPushdown = expr @@ -342,7 +339,7 @@ func (m RangeMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, // supported and the inner expression is also splittable. // A range aggregation is splittable, if the aggregation operation is // supported. -// A binary expression is splittable, if both the left and the right hand side +// A binary expression is splittable, if both the left and the right-hand side // are splittable. func isSplittableByRange(expr syntax.SampleExpr) bool { switch e := expr.(type) { From 38e5e410feb128730f2bf4bc9a97d4c33863fbdf Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Thu, 21 Apr 2022 08:53:42 +0200 Subject: [PATCH 006/223] Do not parse string of empty matchers (#5980) This fixes a bug in the index gateway when querying values for a label. Since the gRPC handler for LabelValuesForMetricName in the index gateway allows empty matchers in the LabelValuesForMetricNameRequest, we need to check if the matchers string is an empty matcher (`{}`) before we parse the string. This bug was introduced with the index gateway api refactoring in #5892 Fixes: #5965 Signed-off-by: Christian Haudum --- pkg/logql/syntax/parser.go | 6 +++++- pkg/storage/stores/shipper/indexgateway/gateway.go | 11 ++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go index b334c5647438c..12b5f072e280a 100644 --- a/pkg/logql/syntax/parser.go +++ b/pkg/logql/syntax/parser.go @@ -15,7 +15,11 @@ import ( "github.com/grafana/loki/pkg/util" ) -const errAtleastOneEqualityMatcherRequired = "queries require at least one regexp or equality matcher that does not have an empty-compatible value. For instance, app=~\".*\" does not meet this requirement, but app=~\".+\" will" +const ( + EmptyMatchers = "{}" + + errAtleastOneEqualityMatcherRequired = "queries require at least one regexp or equality matcher that does not have an empty-compatible value. For instance, app=~\".*\" does not meet this requirement, but app=~\".+\" will" +) var parserPool = sync.Pool{ New: func() interface{} { diff --git a/pkg/storage/stores/shipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexgateway/gateway.go index b2c785fb79bdc..df2bd1aa385e2 100644 --- a/pkg/storage/stores/shipper/indexgateway/gateway.go +++ b/pkg/storage/stores/shipper/indexgateway/gateway.go @@ -333,9 +333,14 @@ func (g *Gateway) LabelValuesForMetricName(ctx context.Context, req *indexgatewa if err != nil { return nil, err } - matchers, err := syntax.ParseMatchers(req.Matchers) - if err != nil { - return nil, err + var matchers []*labels.Matcher + // An empty matchers string cannot be parsed, + // therefore we check the string representation of the the matchers. + if req.Matchers != syntax.EmptyMatchers { + matchers, err = syntax.ParseMatchers(req.Matchers) + if err != nil { + return nil, err + } } names, err := g.indexQuerier.LabelValuesForMetricName(ctx, instanceID, req.From, req.Through, req.MetricName, req.LabelName, matchers...) if err != nil { From db238873f9589c669202f5ae93445b72ed9175b7 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Thu, 21 Apr 2022 09:23:47 +0200 Subject: [PATCH 007/223] Call correct method on index store (#5979) when index gateway does not implement the new API. This bug did not yield an error but only returned incorrect results because both functions `c.IndexStore.LabelNamesForMetricName` and `c.IndexStore.LabelValuesForMetricName` return `([]string, error)`. Signed-off-by: Christian Haudum --- pkg/storage/stores/series/series_index_gateway_store.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go index fb1a68eac1152..5243c537a5786 100644 --- a/pkg/storage/stores/series/series_index_gateway_store.go +++ b/pkg/storage/stores/series/series_index_gateway_store.go @@ -88,7 +88,7 @@ func (c *IndexGatewayClientStore) LabelValuesForMetricName(ctx context.Context, }) if isUnimplementedCallError(err) { // Handle communication with older index gateways gracefully, by falling back to the index store calls. - return c.IndexStore.LabelNamesForMetricName(ctx, userID, from, through, metricName) + return c.IndexStore.LabelValuesForMetricName(ctx, userID, from, through, metricName, labelName, matchers...) } if err != nil { return nil, err From 66a4692423582ed17cce9bd86b69d55663dc7721 Mon Sep 17 00:00:00 2001 From: irizzant Date: Thu, 21 Apr 2022 09:26:06 +0200 Subject: [PATCH 008/223] fix: remove typo in gateway config (#5981) Signed-off-by: irizzant --- production/ksonnet/loki/gateway.libsonnet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/production/ksonnet/loki/gateway.libsonnet b/production/ksonnet/loki/gateway.libsonnet index e02f3d84fd41c..f2174dbc7b846 100644 --- a/production/ksonnet/loki/gateway.libsonnet +++ b/production/ksonnet/loki/gateway.libsonnet @@ -39,7 +39,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; access_log /dev/stderr main; sendfile on; tcp_nopush on; - resolver $(dns_resolver)s; + resolver %(dns_resolver)s; server { listen 80; From 337659602ebea5a107cc38739ea62159fe6f605e Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Thu, 21 Apr 2022 08:50:54 +0100 Subject: [PATCH 009/223] Add string representation of boltdb shipper mode (#5982) --- .../stores/shipper/shipper_index_client.go | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/pkg/storage/stores/shipper/shipper_index_client.go b/pkg/storage/stores/shipper/shipper_index_client.go index d3b326f095878..700f7508a0e45 100644 --- a/pkg/storage/stores/shipper/shipper_index_client.go +++ b/pkg/storage/stores/shipper/shipper_index_client.go @@ -27,9 +27,11 @@ import ( "github.com/grafana/loki/pkg/util/spanlogger" ) +type Mode int + const ( // ModeReadWrite is to allow both read and write - ModeReadWrite = iota + ModeReadWrite Mode = iota // ModeReadOnly is to allow only read operations ModeReadOnly // ModeWriteOnly is to allow only write operations @@ -42,6 +44,18 @@ const ( UploadInterval = 1 * time.Minute ) +func (m Mode) String() string { + switch m { + case ModeReadWrite: + return "read-write" + case ModeReadOnly: + return "read-only" + case ModeWriteOnly: + return "write-only" + } + return "unknown" +} + type boltDBIndexClient interface { QueryWithCursor(_ context.Context, c *bbolt.Cursor, query index.Query, callback index.QueryPagesCallback) error NewWriteBatch() index.WriteBatch @@ -60,7 +74,7 @@ type Config struct { IndexGatewayClientConfig IndexGatewayClientConfig `yaml:"index_gateway_client"` BuildPerTenantIndex bool `yaml:"build_per_tenant_index"` IngesterName string `yaml:"-"` - Mode int `yaml:"-"` + Mode Mode `yaml:"-"` IngesterDBRetainPeriod time.Duration `yaml:"-"` } @@ -104,7 +118,7 @@ func NewShipper(cfg Config, storageClient client.ObjectClient, limits downloads. return nil, err } - level.Info(util_log.Logger).Log("msg", fmt.Sprintf("starting boltdb shipper in %d mode", cfg.Mode)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("starting boltdb shipper in %s mode", cfg.Mode)) return &shipper, nil } From 3fa6cc9fded401be1ed1febbea08612042fb243e Mon Sep 17 00:00:00 2001 From: Danny Kopping Date: Thu, 21 Apr 2022 13:51:10 +0200 Subject: [PATCH 010/223] Querier: prevent unnecessary calls to ingesters (#5984) * Restrict gRPC calls to ingesters when requesting series if there is no overlap within the query_ingesters_within value * Add tests for isWithinIngesterMaxLookbackPeriod * Adding tests for calculateIngesterMaxLookbackPeriod * Only send query for range overlapping with ingesters * Restrict gRPC calls to ingesters when requesting series if there is no overlap within the query_ingesters_within value * Add tests for isWithinIngesterMaxLookbackPeriod * Adding tests for calculateIngesterMaxLookbackPeriod * Only send query for range overlapping with ingesters * Add test for when to query ingester and storage * Fix deadlock in Series when just queriying the ingesters * Refactored slightly for better readability * Added CHANGELOG entry * Fix typo * Rephrase test cases names * Move block to outer loop Co-authored-by: Salva Corts --- CHANGELOG.md | 1 + pkg/querier/querier.go | 77 +++++++-- pkg/querier/querier_test.go | 322 ++++++++++++++++++++++++++++++++++++ 3 files changed, 384 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1799c097baf2..ae18a678b012e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ## Main +* [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist. diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 6cf307120ac18..9207381a30ca8 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -237,19 +237,42 @@ func (q *SingleTenantQuerier) deletesForUser(ctx context.Context, startT, endT t return deletes, nil } +func (q *SingleTenantQuerier) isWithinIngesterMaxLookbackPeriod(maxLookback time.Duration, queryEnd time.Time) bool { + // if no lookback limits are configured, always consider this within the range of the lookback period + if maxLookback <= 0 { + return true + } + + // find the first instance that we would want to query the ingester from... + ingesterOldestStartTime := time.Now().Add(-maxLookback) + + // ...and if the query range ends before that, don't query the ingester + return queryEnd.After(ingesterOldestStartTime) +} + +func (q *SingleTenantQuerier) calculateIngesterMaxLookbackPeriod() time.Duration { + mlb := time.Duration(-1) + if q.cfg.IngesterQueryStoreMaxLookback != 0 { + // IngesterQueryStoreMaxLookback takes the precedence over QueryIngestersWithin while also limiting the store query range. + mlb = q.cfg.IngesterQueryStoreMaxLookback + } else if q.cfg.QueryIngestersWithin != 0 { + mlb = q.cfg.QueryIngestersWithin + } + + return mlb +} + func (q *SingleTenantQuerier) buildQueryIntervals(queryStart, queryEnd time.Time) (*interval, *interval) { // limitQueryInterval is a flag for whether store queries should be limited to start time of ingester queries. limitQueryInterval := false // ingesterMLB having -1 means query ingester for whole duration. - ingesterMLB := time.Duration(-1) if q.cfg.IngesterQueryStoreMaxLookback != 0 { // IngesterQueryStoreMaxLookback takes the precedence over QueryIngestersWithin while also limiting the store query range. limitQueryInterval = true - ingesterMLB = q.cfg.IngesterQueryStoreMaxLookback - } else if q.cfg.QueryIngestersWithin != 0 { - ingesterMLB = q.cfg.QueryIngestersWithin } + ingesterMLB := q.calculateIngesterMaxLookbackPeriod() + // query ingester for whole duration. if ingesterMLB == -1 { i := &interval{ @@ -266,15 +289,18 @@ func (q *SingleTenantQuerier) buildQueryIntervals(queryStart, queryEnd time.Time return i, i } + ingesterQueryWithinRange := q.isWithinIngesterMaxLookbackPeriod(ingesterMLB, queryEnd) + // see if there is an overlap between ingester query interval and actual query interval, if not just do the store query. - ingesterOldestStartTime := time.Now().Add(-ingesterMLB) - if queryEnd.Before(ingesterOldestStartTime) { + if !ingesterQueryWithinRange { return nil, &interval{ start: queryStart, end: queryEnd, } } + ingesterOldestStartTime := time.Now().Add(-ingesterMLB) + // if there is an overlap and we are not limiting the query interval then do both store and ingester query for whole query interval. if !limitQueryInterval { i := &interval{ @@ -327,17 +353,25 @@ func (q *SingleTenantQuerier) Label(ctx context.Context, req *logproto.LabelRequ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout)) defer cancel() + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(*req.Start, *req.End) + var ingesterValues [][]string - if !q.cfg.QueryStoreOnly { - ingesterValues, err = q.ingesterQuerier.Label(ctx, req) + if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { + timeFramedReq := *req + timeFramedReq.Start = &ingesterQueryInterval.start + timeFramedReq.End = &ingesterQueryInterval.end + + ingesterValues, err = q.ingesterQuerier.Label(ctx, &timeFramedReq) if err != nil { return nil, err } } var storeValues []string - if !q.cfg.QueryIngesterOnly { - from, through := model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()) + if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { + from := model.TimeFromUnixNano(storeQueryInterval.start.UnixNano()) + through := model.TimeFromUnixNano(storeQueryInterval.end.UnixNano()) + if req.Values { storeValues, err = q.store.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name) if err != nil { @@ -446,13 +480,17 @@ func (q *SingleTenantQuerier) awaitSeries(ctx context.Context, req *logproto.Ser series := make(chan [][]logproto.SeriesIdentifier, 2) errs := make(chan error, 2) + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.Start, req.End) + // fetch series from ingesters and store concurrently - if q.cfg.QueryStoreOnly { - series <- [][]logproto.SeriesIdentifier{} - } else { + if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { + timeFramedReq := *req + timeFramedReq.Start = ingesterQueryInterval.start + timeFramedReq.End = ingesterQueryInterval.end + go func() { // fetch series identifiers from ingesters - resps, err := q.ingesterQuerier.Series(ctx, req) + resps, err := q.ingesterQuerier.Series(ctx, &timeFramedReq) if err != nil { errs <- err return @@ -460,17 +498,24 @@ func (q *SingleTenantQuerier) awaitSeries(ctx context.Context, req *logproto.Ser series <- resps }() + } else { + // If only queriying the store or the query range does not overlap with the ingester max lookback period (defined by `query_ingesters_within`) + // then don't call out to the ingesters, and send an empty result back to the channel + series <- [][]logproto.SeriesIdentifier{} } - if !q.cfg.QueryIngesterOnly { + if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { go func() { - storeValues, err := q.seriesForMatchers(ctx, req.Start, req.End, req.GetGroups(), req.Shards) + storeValues, err := q.seriesForMatchers(ctx, storeQueryInterval.start, storeQueryInterval.end, req.GetGroups(), req.Shards) if err != nil { errs <- err return } series <- [][]logproto.SeriesIdentifier{storeValues} }() + } else { + // If we are not querying the store, send an empty result back to the channel + series <- [][]logproto.SeriesIdentifier{} } var sets [][]logproto.SeriesIdentifier diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index c9a080ea032ca..3411c5f9b7975 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -686,6 +686,328 @@ func TestQuerier_buildQueryIntervals(t *testing.T) { } } +func TestQuerier_calculateIngesterMaxLookbackPeriod(t *testing.T) { + for _, tc := range []struct { + name string + ingesterQueryStoreMaxLookback time.Duration + queryIngestersWithin time.Duration + expected time.Duration + }{ + { + name: "defaults are set; infinite lookback period if no values are set", + expected: -1, + }, + { + name: "only setting ingesterQueryStoreMaxLookback", + ingesterQueryStoreMaxLookback: time.Hour, + expected: time.Hour, + }, + { + name: "setting both ingesterQueryStoreMaxLookback and queryIngestersWithin; ingesterQueryStoreMaxLookback takes precedence", + ingesterQueryStoreMaxLookback: time.Hour, + queryIngestersWithin: time.Minute, + expected: time.Hour, + }, + { + name: "only setting queryIngestersWithin", + queryIngestersWithin: time.Minute, + expected: time.Minute, + }, + } { + t.Run(tc.name, func(t *testing.T) { + querier := SingleTenantQuerier{cfg: Config{ + IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback, + QueryIngestersWithin: tc.queryIngestersWithin, + }} + + assert.Equal(t, tc.expected, querier.calculateIngesterMaxLookbackPeriod()) + }) + } +} + +func TestQuerier_isWithinIngesterMaxLookbackPeriod(t *testing.T) { + overlappingQuery := interval{ + start: time.Now().Add(-6 * time.Hour), + end: time.Now(), + } + + nonOverlappingQuery := interval{ + start: time.Now().Add(-24 * time.Hour), + end: time.Now().Add(-12 * time.Hour), + } + + for _, tc := range []struct { + name string + ingesterQueryStoreMaxLookback time.Duration + queryIngestersWithin time.Duration + overlappingWithinRange bool + nonOverlappingWithinRange bool + }{ + { + name: "default values, query ingesters and store for whole duration", + overlappingWithinRange: true, + nonOverlappingWithinRange: true, + }, + { + name: "ingesterQueryStoreMaxLookback set to 1h", + ingesterQueryStoreMaxLookback: time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "ingesterQueryStoreMaxLookback set to 10h", + ingesterQueryStoreMaxLookback: 10 * time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "ingesterQueryStoreMaxLookback set to 1h and queryIngestersWithin set to 16h, ingesterQueryStoreMaxLookback takes precedence", + ingesterQueryStoreMaxLookback: time.Hour, + queryIngestersWithin: 16 * time.Hour, // if used, this would put the nonOverlapping query in range + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "ingesterQueryStoreMaxLookback set to -1, query just ingesters", + ingesterQueryStoreMaxLookback: -1, + overlappingWithinRange: true, + nonOverlappingWithinRange: true, + }, + { + name: "queryIngestersWithin set to 1h", + queryIngestersWithin: time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "queryIngestersWithin set to 10h", + queryIngestersWithin: 10 * time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + querier := SingleTenantQuerier{cfg: Config{ + IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback, + QueryIngestersWithin: tc.queryIngestersWithin, + }} + + lookbackPeriod := querier.calculateIngesterMaxLookbackPeriod() + assert.Equal(t, tc.overlappingWithinRange, querier.isWithinIngesterMaxLookbackPeriod(lookbackPeriod, overlappingQuery.end)) + assert.Equal(t, tc.nonOverlappingWithinRange, querier.isWithinIngesterMaxLookbackPeriod(lookbackPeriod, nonOverlappingQuery.end)) + }) + } +} + +func TestQuerier_RequestingIngesters(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "test") + + requestMapping := map[string]struct { + ingesterMethod string + storeMethod string + }{ + "SelectLogs": { + ingesterMethod: "Query", + storeMethod: "SelectLogs", + }, + "SelectSamples": { + ingesterMethod: "QuerySample", + storeMethod: "SelectSamples", + }, + "LabelValuesForMetricName": { + ingesterMethod: "Label", + storeMethod: "LabelValuesForMetricName", + }, + "LabelNamesForMetricName": { + ingesterMethod: "Label", + storeMethod: "LabelNamesForMetricName", + }, + "Series": { + ingesterMethod: "Series", + storeMethod: "Series", + }, + } + + tests := []struct { + desc string + start, end time.Time + setIngesterQueryStoreMaxLookback bool + expectedCallsStore int + expectedCallsIngesters int + }{ + { + desc: "Data in storage and ingesters", + start: time.Now().Add(-time.Hour * 2), + end: time.Now(), + expectedCallsStore: 1, + expectedCallsIngesters: 1, + }, + { + desc: "Data in ingesters (IngesterQueryStoreMaxLookback not set)", + start: time.Now().Add(-time.Minute * 15), + end: time.Now(), + expectedCallsStore: 1, + expectedCallsIngesters: 1, + }, + { + desc: "Data only in storage", + start: time.Now().Add(-time.Hour * 2), + end: time.Now().Add(-time.Hour * 1), + expectedCallsStore: 1, + expectedCallsIngesters: 0, + }, + { + desc: "Data in ingesters (IngesterQueryStoreMaxLookback set)", + start: time.Now().Add(-time.Minute * 15), + end: time.Now(), + setIngesterQueryStoreMaxLookback: true, + expectedCallsStore: 0, + expectedCallsIngesters: 1, + }, + } + + requests := []struct { + name string + do func(querier *SingleTenantQuerier, start, end time.Time) error + }{ + { + name: "SelectLogs", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.SelectLogs(ctx, logql.SelectLogParams{ + QueryRequest: &logproto.QueryRequest{ + Selector: "{type=\"test\", fail=\"yes\"} |= \"foo\"", + Limit: 10, + Start: start, + End: end, + Direction: logproto.FORWARD, + }, + }) + + return err + }, + }, + { + name: "SelectSamples", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.SelectSamples(ctx, logql.SelectSampleParams{ + SampleQueryRequest: &logproto.SampleQueryRequest{ + Selector: "count_over_time({foo=\"bar\"}[5m])", + Start: start, + End: end, + }, + }) + return err + }, + }, + { + name: "LabelValuesForMetricName", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.Label(ctx, &logproto.LabelRequest{ + Name: "type", + Values: true, + Start: &start, + End: &end, + }) + return err + }, + }, + { + name: "LabelNamesForMetricName", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.Label(ctx, &logproto.LabelRequest{ + Values: false, + Start: &start, + End: &end, + }) + return err + }, + }, + { + name: "Series", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.Series(ctx, &logproto.SeriesRequest{ + Start: start, + End: end, + }) + return err + }, + }, + } + + for _, tc := range tests { + t.Run(tc.desc, func(t *testing.T) { + + conf := mockQuerierConfig() + conf.QueryIngestersWithin = time.Minute * 30 + if tc.setIngesterQueryStoreMaxLookback { + conf.IngesterQueryStoreMaxLookback = conf.QueryIngestersWithin + } + + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + + for _, request := range requests { + t.Run(request.name, func(t *testing.T) { + ingesterClient, store, querier, err := setupIngesterQuerierMocks(conf, limits) + require.NoError(t, err) + + err = request.do(querier, tc.start, tc.end) + require.NoError(t, err) + + callsIngesters := ingesterClient.GetMockedCallsByMethod(requestMapping[request.name].ingesterMethod) + assert.Equal(t, tc.expectedCallsIngesters, len(callsIngesters)) + + callsStore := store.GetMockedCallsByMethod(requestMapping[request.name].storeMethod) + assert.Equal(t, tc.expectedCallsStore, len(callsStore)) + }) + } + }) + } +} + +func setupIngesterQuerierMocks(conf Config, limits *validation.Overrides) (*querierClientMock, *storeMock, *SingleTenantQuerier, error) { + queryClient := newQueryClientMock() + queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil) + + querySampleClient := newQuerySampleClientMock() + querySampleClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil) + + ingesterClient := newQuerierClientMock() + ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil) + ingesterClient.On("QuerySample", mock.Anything, mock.Anything, mock.Anything).Return(querySampleClient, nil) + ingesterClient.On("Label", mock.Anything, mock.Anything, mock.Anything).Return(mockLabelResponse([]string{"bar"}), nil) + ingesterClient.On("Series", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.SeriesResponse{ + Series: []logproto.SeriesIdentifier{ + { + Labels: map[string]string{"bar": "1"}, + }, + }, + }, nil) + + store := newStoreMock() + store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil) + store.On("SelectSamples", mock.Anything, mock.Anything).Return(mockSampleIterator(querySampleClient), nil) + store.On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"1", "2", "3"}, nil) + store.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"foo"}, nil) + store.On("Series", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{ + {Labels: map[string]string{"foo": "1"}}, + }, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + store, limits) + + if err != nil { + return nil, nil, nil, err + } + + return ingesterClient, store, querier, nil +} + type fakeTimeLimits struct { maxQueryLookback time.Duration maxQueryLength time.Duration From 9cef86b162536179180724a0036956fc3572d904 Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Thu, 21 Apr 2022 11:13:52 -0600 Subject: [PATCH 011/223] Invalidate caches on delete (#5661) * Generate cache invalidation numbers in the delete store * Get cache generation numbers from the store on request * changlog * rename tombstones to something more meaningful * User invisible module * query frontend relies on a compactor to get the cache generation number * fix serialization * source -> name * lint errors * lint errors * log non-200 responses * add jsonnet changes * lint * review feedback * review feedback * client rename --- CHANGELOG.md | 1 + docs/sources/configuration/_index.md | 9 ++ pkg/loki/modules.go | 42 ++++- pkg/lokifrontend/config.go | 3 +- pkg/querier/queryrange/limits_test.go | 4 +- pkg/querier/queryrange/roundtrip.go | 6 +- pkg/querier/queryrange/roundtrip_test.go | 20 +-- .../stores/shipper/compactor/compactor.go | 2 +- .../deletion/delete_requests_manager_test.go | 49 ++---- .../deletion/delete_requests_store.go | 46 ++++++ .../deletion/delete_requests_store_test.go | 24 +++ .../deletion/noop_delete_requests_store.go | 8 + .../compactor/deletion/request_handler.go | 22 +++ .../generationnumber/gennumber_client.go | 81 ++++++++++ .../generationnumber/gennumber_client_test.go | 40 +++++ .../generationnumber/gennumber_loader.go | 153 ++++++++++++++++++ .../generationnumber/gennumber_loader_test.go | 54 +++++++ .../compactor/generationnumber/metrics.go | 36 +++++ .../ksonnet/loki/boltdb_shipper.libsonnet | 4 + production/ksonnet/loki/config.libsonnet | 1 + 20 files changed, 543 insertions(+), 62 deletions(-) create mode 100644 pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client.go create mode 100644 pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client_test.go create mode 100644 pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader.go create mode 100644 pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader_test.go create mode 100644 pkg/storage/stores/shipper/compactor/generationnumber/metrics.go diff --git a/CHANGELOG.md b/CHANGELOG.md index ae18a678b012e..3ddf5804c904b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -178,6 +178,7 @@ to include only the most relevant. * [5543](https://github.com/grafana/loki/pull/5543) **cyriltovena**: update loki go version to 1.17.8 * [5450](https://github.com/grafana/loki/pull/5450) **BenoitKnecht**: pkg/ruler/base: Add external_labels option * [5484](https://github.com/grafana/loki/pull/5450) **sandeepsukhani**: Add support for per user index query readiness with limits overrides +* [5661](https://github.com/grafana/loki/pull/5450) **masslessparticle**: Invalidate caches on deletes * [5358](https://github.com/grafana/loki/pull/5358) **DylanGuedes**: Add `RingMode` support to `IndexGateway` * [5435](https://github.com/grafana/loki/pull/5435) **slim-bean**: set match_max_concurrent true by default * [5361](https://github.com/grafana/loki/pull/5361) **cyriltovena**: Add usage report into Loki. diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 02ba5c1d93d21..8067e753a61c4 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -370,6 +370,10 @@ The `frontend` block configures the Loki query-frontend. # CLI flag: -frontend.downstream-url [downstream_url: | default = ""] +# Address, including port, where the compactor api is served +# CLI flag: -frontend.compactor-address +[compactor_address: | default = ""] + # Log queries that are slower than the specified duration. Set to 0 to disable. # Set to < 0 to enable on all queries. # CLI flag: -frontend.log-queries-longer-than @@ -2041,6 +2045,11 @@ compacts index shards to more performant forms. # CLI flag: -boltdb.shipper.compactor.delete-request-cancel-period [delete_request_cancel_period: | default = 24h] +# Which deletion mode to use. Supported values are: disabled, +# whole-stream-deletion, filter-only, filter-and-delete +# CLI flag: -boltdb.shipper.compactor.deletion-mode +[deletion_mode: | default = "whole-stream-deletion"] + # Maximum number of tables to compact in parallel. # While increasing this value, please make sure compactor has enough disk space # allocated to be able to store and compact as many tables. diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index cb9a80644ea9c..389d97de954ed 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -10,18 +10,17 @@ import ( "os" "time" - "github.com/grafana/dskit/kv" - "github.com/grafana/dskit/tenant" - gerrors "github.com/pkg/errors" - "github.com/NYTimes/gziphandler" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/kv" "github.com/grafana/dskit/kv/codec" "github.com/grafana/dskit/kv/memberlist" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/runtimeconfig" "github.com/grafana/dskit/services" + "github.com/grafana/dskit/tenant" + gerrors "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -53,6 +52,7 @@ import ( "github.com/grafana/loki/pkg/storage/stores/shipper" "github.com/grafana/loki/pkg/storage/stores/shipper/compactor" "github.com/grafana/loki/pkg/storage/stores/shipper/compactor/deletion" + "github.com/grafana/loki/pkg/storage/stores/shipper/compactor/generationnumber" "github.com/grafana/loki/pkg/storage/stores/shipper/indexgateway" "github.com/grafana/loki/pkg/storage/stores/shipper/indexgateway/indexgatewaypb" "github.com/grafana/loki/pkg/storage/stores/shipper/uploads" @@ -494,11 +494,17 @@ func (disabledShuffleShardingLimits) MaxQueriersPerUser(userID string) int { ret func (t *Loki) initQueryFrontendTripperware() (_ services.Service, err error) { level.Debug(util_log.Logger).Log("msg", "initializing query frontend tripperware") + cacheGenClient, err := t.cacheGenClient() + if err != nil { + return nil, err + } + tripperware, stopper, err := queryrange.NewTripperware( t.Cfg.QueryRange, util_log.Logger, t.overrides, t.Cfg.SchemaConfig, + generationnumber.NewGenNumberLoader(cacheGenClient, prometheus.DefaultRegisterer), prometheus.DefaultRegisterer, ) if err != nil { @@ -510,6 +516,29 @@ func (t *Loki) initQueryFrontendTripperware() (_ services.Service, err error) { return services.NewIdleService(nil, nil), nil } +func (t *Loki) cacheGenClient() (generationnumber.CacheGenClient, error) { + filteringEnabled, err := deletion.FilteringEnabled(t.Cfg.CompactorConfig.DeletionMode) + if err != nil { + return nil, err + } + + if !filteringEnabled { + return deletion.NewNoOpDeleteRequestsStore(), nil + } + + compactorAddress := t.Cfg.Frontend.CompactorAddress + if t.Cfg.isModuleEnabled(All) || t.Cfg.isModuleEnabled(Read) { + // In single binary or read modes, this module depends on Server + compactorAddress = fmt.Sprintf("http://127.0.0.1:%d", t.Cfg.Server.HTTPListenPort) + } + + if compactorAddress == "" { + return nil, errors.New("query filtering for deletes requires 'compactor_address' to be configured") + } + + return generationnumber.NewGenNumberClient(compactorAddress, &http.Client{Timeout: 5 * time.Second}) +} + func (t *Loki) initQueryFrontend() (_ services.Service, err error) { level.Debug(util_log.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.Cfg.Frontend)) @@ -766,11 +795,12 @@ func (t *Loki) initCompactor() (services.Service, error) { t.Server.HTTP.Path("/compactor/ring").Methods("GET", "POST").Handler(t.compactor) - // TODO: update this when the other deletion modes are available - if t.Cfg.CompactorConfig.RetentionEnabled && t.compactor.DeleteMode() == deletion.WholeStreamDeletion { + if t.Cfg.CompactorConfig.RetentionEnabled && t.compactor.DeleteMode() != deletion.Disabled { t.Server.HTTP.Path("/loki/api/v1/delete").Methods("PUT", "POST").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.AddDeleteRequestHandler))) t.Server.HTTP.Path("/loki/api/v1/delete").Methods("GET").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.GetAllDeleteRequestsHandler))) t.Server.HTTP.Path("/loki/api/v1/delete").Methods("DELETE").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.CancelDeleteRequestHandler))) + + t.Server.HTTP.Path("/loki/api/v1/cache/generation_numbers").Methods("GET").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.GetCacheGenerationNumberHandler))) } return t.compactor, nil diff --git a/pkg/lokifrontend/config.go b/pkg/lokifrontend/config.go index f4764ff6b4153..b4d18c84cb681 100644 --- a/pkg/lokifrontend/config.go +++ b/pkg/lokifrontend/config.go @@ -15,6 +15,7 @@ type Config struct { CompressResponses bool `yaml:"compress_responses"` DownstreamURL string `yaml:"downstream_url"` + CompactorAddress string `yaml:"compactor_address"` TailProxyURL string `yaml:"tail_proxy_url"` } @@ -27,6 +28,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.") f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") - + f.StringVar(&cfg.CompactorAddress, "frontend.compactor-address", "", "host and port where the compactor API is listening") f.StringVar(&cfg.TailProxyURL, "frontend.tail-proxy-url", "", "URL of querier for tail proxy.") } diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index b4b23f809f20f..e34865e553b74 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -52,7 +52,7 @@ func Test_seriesLimiter(t *testing.T) { cfg.CacheResults = false // split in 7 with 2 in // max. l := WithSplitByLimits(fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, time.Hour) - tpw, stopper, err := NewTripperware(cfg, util_log.Logger, l, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(cfg, util_log.Logger, l, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -237,7 +237,7 @@ func Test_MaxQueryLookBack(t *testing.T) { tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{ maxQueryLookback: 1 * time.Hour, maxQueryParallelism: 1, - }, config.SchemaConfig{}, nil) + }, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 832cef206c217..79bbc706011a0 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -42,6 +42,7 @@ func NewTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, + cacheGenNumLoader queryrangebase.CacheGenNumberLoader, registerer prometheus.Registerer, ) (queryrangebase.Tripperware, Stopper, error) { metrics := NewMetrics(registerer) @@ -61,7 +62,7 @@ func NewTripperware( } metricsTripperware, err := NewMetricTripperware(cfg, log, limits, schema, LokiCodec, c, - PrometheusExtractor{}, metrics, registerer) + cacheGenNumLoader, PrometheusExtractor{}, metrics, registerer) if err != nil { return nil, nil, err } @@ -387,6 +388,7 @@ func NewMetricTripperware( schema config.SchemaConfig, codec queryrangebase.Codec, c cache.Cache, + cacheGenNumLoader queryrangebase.CacheGenNumberLoader, extractor queryrangebase.Extractor, metrics *Metrics, registerer prometheus.Registerer, @@ -414,7 +416,7 @@ func NewMetricTripperware( limits, codec, extractor, - nil, + cacheGenNumLoader, func(r queryrangebase.Request) bool { return !r.GetCachingOptions().Disabled }, diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index d428ec2a61aab..f5df5c54be724 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -109,7 +109,7 @@ var ( // those tests are mostly for testing the glue between all component and make sure they activate correctly. func TestMetricsTripperware(t *testing.T) { l := WithSplitByLimits(fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1}, 4*time.Hour) - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, l, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, l, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -172,7 +172,7 @@ func TestMetricsTripperware(t *testing.T) { } func TestLogFilterTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -221,7 +221,7 @@ func TestLogFilterTripperware(t *testing.T) { func TestInstantQueryTripperware(t *testing.T) { testShardingConfig := testConfig testShardingConfig.ShardedQueries = true - tpw, stopper, err := NewTripperware(testShardingConfig, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testShardingConfig, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -257,7 +257,7 @@ func TestInstantQueryTripperware(t *testing.T) { } func TestSeriesTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -298,7 +298,7 @@ func TestSeriesTripperware(t *testing.T) { } func TestLabelsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -344,7 +344,7 @@ func TestLabelsTripperware(t *testing.T) { } func TestLogNoRegex(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -378,7 +378,7 @@ func TestLogNoRegex(t *testing.T) { } func TestUnhandledPath(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -403,7 +403,7 @@ func TestUnhandledPath(t *testing.T) { func TestRegexpParamsSupport(t *testing.T) { l := WithSplitByLimits(fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, 4*time.Hour) - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, l, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, l, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -486,7 +486,7 @@ func TestPostQueries(t *testing.T) { } func TestEntriesLimitsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } @@ -517,7 +517,7 @@ func TestEntriesLimitsTripperware(t *testing.T) { } func TestEntriesLimitWithZeroTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil, nil) if stopper != nil { defer stopper.Stop() } diff --git a/pkg/storage/stores/shipper/compactor/compactor.go b/pkg/storage/stores/shipper/compactor/compactor.go index 324a8db49babd..fdffe742838f7 100644 --- a/pkg/storage/stores/shipper/compactor/compactor.go +++ b/pkg/storage/stores/shipper/compactor/compactor.go @@ -229,7 +229,7 @@ func (c *Compactor) init(storageConfig storage.Config, schemaConfig config.Schem return err } - if c.deleteMode == deletion.WholeStreamDeletion { + if c.deleteMode != deletion.Disabled { deletionWorkDir := filepath.Join(c.cfg.WorkingDirectory, "deletion") c.deleteRequestsStore, err = deletion.NewDeleteStore(deletionWorkDir, c.indexStorageClient) diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go index 7e66d9c894c9b..9e3f314580e20 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go @@ -14,46 +14,6 @@ import ( const testUserID = "test-user" -type mockDeleteRequestsStore struct { - deleteRequests []DeleteRequest -} - -func (m mockDeleteRequestsStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { - return m.deleteRequests, nil -} - -func (m mockDeleteRequestsStore) UpdateStatus(ctx context.Context, userID, requestID string, newStatus DeleteRequestStatus) error { - return nil -} - -func (m mockDeleteRequestsStore) AddDeleteRequest(ctx context.Context, userID string, startTime, endTime model.Time, query string) error { - panic("implement me") -} - -func (m mockDeleteRequestsStore) GetDeleteRequestsForUserByStatus(ctx context.Context, userID string, status DeleteRequestStatus) ([]DeleteRequest, error) { - panic("implement me") -} - -func (m mockDeleteRequestsStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - panic("implement me") -} - -func (m mockDeleteRequestsStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) { - panic("implement me") -} - -func (m mockDeleteRequestsStore) GetPendingDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - panic("implement me") -} - -func (m mockDeleteRequestsStore) RemoveDeleteRequest(ctx context.Context, userID, requestID string, createdAt, startTime, endTime model.Time) error { - panic("implement me") -} - -func (m mockDeleteRequestsStore) Stop() { - panic("implement me") -} - func TestDeleteRequestsManager_Expired(t *testing.T) { type resp struct { isExpired bool @@ -256,3 +216,12 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }) } } + +type mockDeleteRequestsStore struct { + DeleteRequestsStore + deleteRequests []DeleteRequest +} + +func (m mockDeleteRequestsStore) GetDeleteRequestsByStatus(_ context.Context, _ DeleteRequestStatus) ([]DeleteRequest, error) { + return m.deleteRequests, nil +} diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go index 36f8e02d2c1d9..bf8aa1407b83a 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go @@ -12,6 +12,8 @@ import ( "time" "unsafe" + "github.com/weaveworks/common/user" + "github.com/prometheus/common/model" "github.com/grafana/loki/pkg/storage/stores/series/index" @@ -29,6 +31,7 @@ const ( deleteRequestID indexType = "1" deleteRequestDetails indexType = "2" + cacheGenNum indexType = "3" tempFileSuffix = ".temp" DeleteRequestsTableName = "delete_requests" @@ -43,7 +46,9 @@ type DeleteRequestsStore interface { UpdateStatus(ctx context.Context, userID, requestID string, newStatus DeleteRequestStatus) error GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) RemoveDeleteRequest(ctx context.Context, userID, requestID string, createdAt, startTime, endTime model.Time) error + GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) Stop() + Name() string } // deleteRequestsStore provides all the methods required to manage lifecycle of delete request and things related to it. @@ -106,6 +111,9 @@ func (ds *deleteRequestsStore) addDeleteRequest(ctx context.Context, userID stri writeBatch.Add(DeleteRequestsTableName, fmt.Sprintf("%s:%s", deleteRequestDetails, userIDAndRequestID), []byte(rangeValue), []byte(query)) + // create a gen number for this result + writeBatch.Add(DeleteRequestsTableName, fmt.Sprintf("%s:%s", cacheGenNum, userID), []byte{}, generateCacheGenNumber()) + err := ds.indexClient.BatchWrite(ctx, writeBatch) if err != nil { return nil, err @@ -139,6 +147,11 @@ func (ds *deleteRequestsStore) UpdateStatus(ctx context.Context, userID, request writeBatch := ds.indexClient.NewWriteBatch() writeBatch.Add(DeleteRequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(newStatus)) + if newStatus == StatusProcessed { + // remove runtime filtering for deleted data + writeBatch.Add(DeleteRequestsTableName, fmt.Sprintf("%s:%s", cacheGenNum, userID), []byte{}, generateCacheGenNumber()) + } + return ds.indexClient.BatchWrite(ctx, writeBatch) } @@ -162,6 +175,27 @@ func (ds *deleteRequestsStore) GetDeleteRequest(ctx context.Context, userID, req return &deleteRequests[0], nil } +func (ds *deleteRequestsStore) GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) { + query := index.Query{TableName: DeleteRequestsTableName, HashValue: fmt.Sprintf("%s:%s", cacheGenNum, userID)} + ctx = user.InjectOrgID(ctx, userID) + + genNumber := "" + err := ds.indexClient.QueryPages(ctx, []index.Query{query}, func(query index.Query, batch index.ReadBatchResult) (shouldContinue bool) { + itr := batch.Iterator() + for itr.Next() { + genNumber = string(itr.Value()) + break + } + return false + }) + + if err != nil { + return "", err + } + + return genNumber, nil +} + func (ds *deleteRequestsStore) queryDeleteRequests(ctx context.Context, deleteQuery index.Query) ([]DeleteRequest, error) { deleteRequests := []DeleteRequest{} // No need to lock inside the callback since we run a single index query. @@ -234,9 +268,17 @@ func (ds *deleteRequestsStore) RemoveDeleteRequest(ctx context.Context, userID, writeBatch.Delete(DeleteRequestsTableName, fmt.Sprintf("%s:%s", deleteRequestDetails, userIDAndRequestID), []byte(rangeValue)) + // ensure caches are invalidated + writeBatch.Add(DeleteRequestsTableName, fmt.Sprintf("%s:%s", cacheGenNum, userID), + []byte{}, []byte(strconv.FormatInt(time.Now().UnixNano(), 10))) + return ds.indexClient.BatchWrite(ctx, writeBatch) } +func (ds *deleteRequestsStore) Name() string { + return "delete_requests_store" +} + func parseDeleteRequestTimestamps(rangeValue []byte, deleteRequest DeleteRequest) (DeleteRequest, error) { hexParts := strings.Split(string(rangeValue), ":") if len(hexParts) != 3 { @@ -299,3 +341,7 @@ func splitUserIDAndRequestID(rangeValue string) (userID, requestID string) { func unsafeGetString(buf []byte) string { return *((*string)(unsafe.Pointer(&buf))) } + +func generateCacheGenNumber() []byte { + return []byte(strconv.FormatInt(time.Now().UnixNano(), 10)) +} diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store_test.go b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store_test.go index 7c63487ba371e..ffd176ee431d8 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store_test.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_store_test.go @@ -99,6 +99,14 @@ func TestDeleteRequestsStore(t *testing.T) { require.NoError(t, err) compareRequests(t, user2ExpectedRequests, user2Requests) + createGenNumber, err := testDeleteRequestsStore.GetCacheGenerationNumber(context.Background(), user1) + require.NoError(t, err) + require.NotEmpty(t, createGenNumber) + + createGenNumber2, err := testDeleteRequestsStore.GetCacheGenerationNumber(context.Background(), user2) + require.NoError(t, err) + require.NotEmpty(t, createGenNumber2) + // get individual delete requests by id and see if they have expected values for _, expectedRequest := range append(user1Requests, user2Requests...) { actualRequest, err := testDeleteRequestsStore.GetDeleteRequest(context.Background(), expectedRequest.UserID, expectedRequest.RequestID) @@ -133,6 +141,14 @@ func TestDeleteRequestsStore(t *testing.T) { require.NoError(t, err) compareRequests(t, user2ExpectedRequests, user2Requests) + updateGenNumber, err := testDeleteRequestsStore.GetCacheGenerationNumber(context.Background(), user1) + require.NoError(t, err) + require.NotEqual(t, createGenNumber, updateGenNumber) + + updateGenNumber2, err := testDeleteRequestsStore.GetCacheGenerationNumber(context.Background(), user2) + require.NoError(t, err) + require.NotEqual(t, createGenNumber2, updateGenNumber2) + // delete the requests from the store updated previously var remainingRequests []DeleteRequest for i := 0; i < len(user1ExpectedRequests); i++ { @@ -154,6 +170,14 @@ func TestDeleteRequestsStore(t *testing.T) { deleteRequests, err = testDeleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived) require.NoError(t, err) compareRequests(t, remainingRequests, deleteRequests) + + deleteGenNumber, err := testDeleteRequestsStore.GetCacheGenerationNumber(context.Background(), user1) + require.NoError(t, err) + require.NotEqual(t, updateGenNumber, deleteGenNumber) + + deleteGenNumber2, err := testDeleteRequestsStore.GetCacheGenerationNumber(context.Background(), user2) + require.NoError(t, err) + require.NotEqual(t, updateGenNumber2, deleteGenNumber2) } func compareRequests(t *testing.T, expected []DeleteRequest, actual []DeleteRequest) { diff --git a/pkg/storage/stores/shipper/compactor/deletion/noop_delete_requests_store.go b/pkg/storage/stores/shipper/compactor/deletion/noop_delete_requests_store.go index 4089ac715cd9c..ac0d1826455da 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/noop_delete_requests_store.go +++ b/pkg/storage/stores/shipper/compactor/deletion/noop_delete_requests_store.go @@ -36,4 +36,12 @@ func (d *noOpDeleteRequestsStore) RemoveDeleteRequest(ctx context.Context, userI return nil } +func (d *noOpDeleteRequestsStore) GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) { + return "", nil +} + func (d *noOpDeleteRequestsStore) Stop() {} + +func (d *noOpDeleteRequestsStore) Name() string { + return "" +} diff --git a/pkg/storage/stores/shipper/compactor/deletion/request_handler.go b/pkg/storage/stores/shipper/compactor/deletion/request_handler.go index 4c1a6aef8d3cb..4566a7c0f4575 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/request_handler.go +++ b/pkg/storage/stores/shipper/compactor/deletion/request_handler.go @@ -161,3 +161,25 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter w.WriteHeader(http.StatusNoContent) } + +// GetCacheGenerationNumberHandler handles requests for a user's cache generation number +func (dm *DeleteRequestHandler) GetCacheGenerationNumberHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userID, err := tenant.TenantID(ctx) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + cacheGenNumber, err := dm.deleteRequestsStore.GetCacheGenerationNumber(ctx, userID) + if err != nil { + level.Error(util_log.Logger).Log("msg", "error getting cache generation number", "err", err) + http.Error(w, fmt.Sprintf("error getting cache generation number %v", err), http.StatusInternalServerError) + return + } + + if err := json.NewEncoder(w).Encode(cacheGenNumber); err != nil { + level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err) + http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) + } +} diff --git a/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client.go b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client.go new file mode 100644 index 0000000000000..7acb6d9bd6193 --- /dev/null +++ b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client.go @@ -0,0 +1,81 @@ +package generationnumber + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/go-kit/log/level" + + "github.com/grafana/loki/pkg/util/log" +) + +const ( + orgHeaderKey = "X-Scope-OrgID" + cacheGenNumPath = "/loki/api/v1/cache/generation_numbers" +) + +type CacheGenClient interface { + GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) + Name() string +} + +type genNumberClient struct { + url string + httpClient doer +} + +type doer interface { + Do(*http.Request) (*http.Response, error) +} + +func NewGenNumberClient(addr string, c doer) (CacheGenClient, error) { + u, err := url.Parse(addr) + if err != nil { + level.Error(log.Logger).Log("msg", "error parsing url", "err", err) + return nil, err + } + u.Path = cacheGenNumPath + + return &genNumberClient{ + url: u.String(), + httpClient: c, + }, nil +} + +func (c *genNumberClient) Name() string { + return "gen_number_client" +} + +func (c *genNumberClient) GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.url, nil) + if err != nil { + level.Error(log.Logger).Log("msg", "error getting cache gen numbers from the store", "err", err) + return "", err + } + + req.Header.Set(orgHeaderKey, userID) + + resp, err := c.httpClient.Do(req) + if err != nil { + level.Error(log.Logger).Log("msg", "error getting cache gen numbers from the store", "err", err) + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + err := fmt.Errorf("unexpected status code: %d", resp.StatusCode) + level.Error(log.Logger).Log("msg", "error getting cache gen numbers from the store", "err", err) + return "", err + } + + var genNumber string + if err := json.NewDecoder(resp.Body).Decode(&genNumber); err != nil { + level.Error(log.Logger).Log("msg", "error marshalling response", "err", err) + return "", err + } + + return genNumber, err +} diff --git a/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client_test.go b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client_test.go new file mode 100644 index 0000000000000..edd711f06dbfd --- /dev/null +++ b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_client_test.go @@ -0,0 +1,40 @@ +package generationnumber + +import ( + "context" + "io" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetCacheGenNumberForUser(t *testing.T) { + httpClient := &mockHTTPClient{ret: `"42"`} + client, err := NewGenNumberClient("http://test-server", httpClient) + require.Nil(t, err) + + cacheGenNumber, err := client.GetCacheGenerationNumber(context.Background(), "userID") + require.Nil(t, err) + + require.Equal(t, "42", cacheGenNumber) + + require.Equal(t, "http://test-server/loki/api/v1/cache/generation_numbers", httpClient.req.URL.String()) + require.Equal(t, http.MethodGet, httpClient.req.Method) + require.Equal(t, "userID", httpClient.req.Header.Get("X-Scope-OrgID")) +} + +type mockHTTPClient struct { + ret string + req *http.Request +} + +func (c *mockHTTPClient) Do(req *http.Request) (*http.Response, error) { + c.req = req + + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(strings.NewReader(c.ret)), + }, nil +} diff --git a/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader.go b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader.go new file mode 100644 index 0000000000000..97a92fd6dca8e --- /dev/null +++ b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader.go @@ -0,0 +1,153 @@ +package generationnumber + +import ( + "context" + "fmt" + "strconv" + "sync" + "time" + + "github.com/go-kit/log/level" + + "github.com/grafana/loki/pkg/util/log" + + "github.com/prometheus/client_golang/prometheus" +) + +const reloadDuration = 5 * time.Minute + +type GenNumberLoader struct { + numberGetter CacheGenClient + numbers map[string]string + quit chan struct{} + lock sync.RWMutex + metrics *genLoaderMetrics +} + +func NewGenNumberLoader(g CacheGenClient, registerer prometheus.Registerer) *GenNumberLoader { + if g == nil { + g = &noopNumberGetter{} + } + + l := &GenNumberLoader{ + numberGetter: g, + numbers: make(map[string]string), + metrics: newGenLoaderMetrics(registerer), + } + go l.loop() + + return l +} + +func (l *GenNumberLoader) loop() { + timer := time.NewTicker(reloadDuration) + for { + select { + case <-timer.C: + err := l.reload() + if err != nil { + level.Error(log.Logger).Log("msg", "error reloading generation numbers", "err", err) + } + case <-l.quit: + return + } + } +} + +func (l *GenNumberLoader) reload() error { + updatedGenNumbers, err := l.getUpdatedGenNumbers() + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + for userID, genNumber := range updatedGenNumbers { + l.numbers[userID] = genNumber + } + + return nil +} + +func (l *GenNumberLoader) getUpdatedGenNumbers() (map[string]string, error) { + l.lock.RLock() + defer l.lock.RUnlock() + + updatedGenNumbers := make(map[string]string) + for userID, oldGenNumber := range l.numbers { + genNumber, err := l.numberGetter.GetCacheGenerationNumber(context.Background(), userID) + if err != nil { + return nil, err + } + + if oldGenNumber != genNumber { + updatedGenNumbers[userID] = genNumber + } + } + + return updatedGenNumbers, nil +} + +func (l *GenNumberLoader) GetResultsCacheGenNumber(tenantIDs []string) string { + return l.getCacheGenNumbersPerTenants(tenantIDs) +} + +func (l *GenNumberLoader) getCacheGenNumbersPerTenants(tenantIDs []string) string { + var max int + for _, tenantID := range tenantIDs { + genNumber := l.getCacheGenNumber(tenantID) + if genNumber == "" { + continue + } + + number, err := strconv.Atoi(genNumber) + if err != nil { + level.Error(log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err) + } + + if number > max { + max = number + } + } + + if max == 0 { + return "" + } + return fmt.Sprint(max) +} + +func (l *GenNumberLoader) getCacheGenNumber(userID string) string { + l.lock.RLock() + if genNumber, ok := l.numbers[userID]; ok { + l.lock.RUnlock() + return genNumber + } + l.lock.RUnlock() + + genNumber, err := l.numberGetter.GetCacheGenerationNumber(context.Background(), userID) + if err != nil { + level.Error(log.Logger).Log("msg", "error loading cache generation numbers", "err", err) + l.metrics.cacheGenLoadFailures.WithLabelValues(l.numberGetter.Name()).Inc() + return "" + } + + l.lock.Lock() + defer l.lock.Unlock() + + l.numbers[userID] = genNumber + return genNumber +} + +func (l *GenNumberLoader) Stop() { + close(l.quit) +} + +type noopNumberGetter struct{} + +func (g *noopNumberGetter) GetCacheGenerationNumber(_ context.Context, _ string) (string, error) { + return "", nil +} + +func (g *noopNumberGetter) Name() string { + return "" +} diff --git a/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader_test.go b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader_test.go new file mode 100644 index 0000000000000..7c5581a8b1811 --- /dev/null +++ b/pkg/storage/stores/shipper/compactor/generationnumber/gennumber_loader_test.go @@ -0,0 +1,54 @@ +package generationnumber + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetCacheGenNumber(t *testing.T) { + s := &mockGenNumberClient{ + genNumbers: map[string]string{ + "tenant-a": "1000", + "tenant-b": "1050", + }, + } + loader := NewGenNumberLoader(s, nil) + + for _, tc := range []struct { + name string + expectedResultsCacheGenNumber string + tenantIDs []string + }{ + { + name: "single tenant with numeric values", + tenantIDs: []string{"tenant-a"}, + expectedResultsCacheGenNumber: "1000", + }, + { + name: "multiple tenants with numeric values", + tenantIDs: []string{"tenant-a", "tenant-b"}, + expectedResultsCacheGenNumber: "1050", + }, + { + name: "no tenants", // not really an expected call, edge case check to avoid any panics + }, + } { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedResultsCacheGenNumber, loader.GetResultsCacheGenNumber(tc.tenantIDs)) + }) + } +} + +type mockGenNumberClient struct { + genNumbers map[string]string +} + +func (g *mockGenNumberClient) GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) { + return g.genNumbers[userID], nil +} + +func (g *mockGenNumberClient) Name() string { + return "" +} diff --git a/pkg/storage/stores/shipper/compactor/generationnumber/metrics.go b/pkg/storage/stores/shipper/compactor/generationnumber/metrics.go new file mode 100644 index 0000000000000..1f302fa08fc74 --- /dev/null +++ b/pkg/storage/stores/shipper/compactor/generationnumber/metrics.go @@ -0,0 +1,36 @@ +package generationnumber + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// Make this package level because we want several instances of a loader to be able to report metrics +var metrics *genLoaderMetrics + +type genLoaderMetrics struct { + cacheGenLoadFailures *prometheus.CounterVec +} + +func newGenLoaderMetrics(r prometheus.Registerer) *genLoaderMetrics { + if metrics != nil { + return metrics + } + + if r == nil { + return nil + } + + cacheGenLoadFailures := prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "loki", + Name: "delete_cache_gen_load_failures_total", + Help: "Total number of failures while loading cache generation number using gen number loader", + }, []string{"source"}) + + r.MustRegister(cacheGenLoadFailures) + + metrics = &genLoaderMetrics{ + cacheGenLoadFailures: cacheGenLoadFailures, + } + + return metrics +} diff --git a/production/ksonnet/loki/boltdb_shipper.libsonnet b/production/ksonnet/loki/boltdb_shipper.libsonnet index 43946c185c47b..42840f16c00c7 100644 --- a/production/ksonnet/loki/boltdb_shipper.libsonnet +++ b/production/ksonnet/loki/boltdb_shipper.libsonnet @@ -74,4 +74,8 @@ statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate') + statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001) // 10001 is the group ID assigned to Loki in the Dockerfile else {}, + + compactor_service: if $._config.using_boltdb_shipper then + k.util.serviceFor($.compactor_statefulset) + else {}, } diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index 7d9f5a050b152..9b979fa3b201a 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -156,6 +156,7 @@ frontend: { compress_responses: true, log_queries_longer_than: '5s', + compactor_address: 'http://compactor.%s.svc.cluster.local:%d' % [$._config.namespace, $._config.http_listen_port], }, frontend_worker: { match_max_concurrent: true, From b6ce3826b4663098c69a4b5381cc77b9456b239c Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Fri, 22 Apr 2022 16:12:30 +0200 Subject: [PATCH 012/223] Use new build image 0.20.3 (#5904) --- .drone/drone.yml | 18 +++++++++--------- .golangci.yml | 2 +- Makefile | 2 +- pkg/ingester/ingester.go | 2 +- pkg/storage/stores/tsdb/index/index.go | 4 ++-- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.drone/drone.yml b/.drone/drone.yml index 028aab3f011a5..5629fb999255b 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -40,27 +40,27 @@ steps: - make BUILD_IN_CONTAINER=false check-drone-drift depends_on: - clone - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: check-drone-drift - commands: - make BUILD_IN_CONTAINER=false check-generated-files depends_on: - clone - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: check-generated-files - commands: - make BUILD_IN_CONTAINER=false test depends_on: - clone - check-generated-files - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: test - commands: - make BUILD_IN_CONTAINER=false lint depends_on: - clone - check-generated-files - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: lint - commands: - make BUILD_IN_CONTAINER=false check-mod @@ -68,7 +68,7 @@ steps: - clone - test - lint - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: check-mod - commands: - apk add make bash && make lint-scripts @@ -79,19 +79,19 @@ steps: depends_on: - clone - check-generated-files - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: loki - commands: - make BUILD_IN_CONTAINER=false validate-example-configs depends_on: - loki - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: validate-example-configs - commands: - make BUILD_IN_CONTAINER=false check-example-config-doc depends_on: - clone - image: grafana/loki-build-image:0.20.1 + image: grafana/loki-build-image:0.20.3 name: check-example-config-doc trigger: event: @@ -1118,6 +1118,6 @@ kind: secret name: deploy_config --- kind: signature -hmac: 64871d70248fbe43acab816fe107dc657442ded224b48779ac15f4559143a2c6 +hmac: 6e2865a871f73b7bec7caad285b3999bfdab4e85e29606061663f4c9160cc35c ... diff --git a/.golangci.yml b/.golangci.yml index b7ef2ca712a41..a9b535c2504d0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,7 +16,7 @@ run: tests: true # list of build tags, all linters use it. Default is empty list. - build-tags: + build-tags: [] # which dirs to skip: they won't be analyzed; # can use regexp here: generated.*, regexp is applied on full path; diff --git a/Makefile b/Makefile index 31b3ca51870f8..7b17411cff567 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) BUILD_IN_CONTAINER ?= true # ensure you run `make drone` after changing this -BUILD_IMAGE_VERSION := 0.20.1 +BUILD_IMAGE_VERSION := 0.20.3 # Docker image info IMAGE_PREFIX ?= grafana diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 70aca05764fe8..e12b9548bffb9 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -551,7 +551,7 @@ func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logpro return &logproto.PushResponse{}, err } -func (i *Ingester) GetOrCreateInstance(instanceID string) *instance { +func (i *Ingester) GetOrCreateInstance(instanceID string) *instance { //nolint:revive inst, ok := i.getInstanceByID(instanceID) if ok { return inst diff --git a/pkg/storage/stores/tsdb/index/index.go b/pkg/storage/stores/tsdb/index/index.go index 0d34f96aaf95a..af69a5b64a126 100644 --- a/pkg/storage/stores/tsdb/index/index.go +++ b/pkg/storage/stores/tsdb/index/index.go @@ -1307,7 +1307,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { r.nameSymbols[off] = k } - r.fingerprintOffsets, err = ReadFingerprintOffsetsTable(r.b, r.toc.FingerprintOffsets) + r.fingerprintOffsets, err = readFingerprintOffsetsTable(r.b, r.toc.FingerprintOffsets) if err != nil { return nil, errors.Wrap(err, "loading fingerprint offsets") } @@ -1522,7 +1522,7 @@ func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64, int) err return d.Err() } -func ReadFingerprintOffsetsTable(bs ByteSlice, off uint64) (FingerprintOffsets, error) { +func readFingerprintOffsetsTable(bs ByteSlice, off uint64) (FingerprintOffsets, error) { d := encoding.DecWrap(tsdb_enc.NewDecbufAt(bs, int(off), castagnoliTable)) cnt := d.Be32() res := make(FingerprintOffsets, 0, int(cnt)) From 7c462caa6407771957035433f74c1a95233c60f1 Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Fri, 22 Apr 2022 18:07:43 +0100 Subject: [PATCH 013/223] Add buf to the build image (#5785) This is part 1 of 2 PRs and updates the build image to version 0.10.4 and adding the buf utility which will later be used for formatting *.proto files --- .drone/drone.jsonnet | 2 +- .drone/drone.yml | 4 ++-- loki-build-image/Dockerfile | 7 +++++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index beec27d5cec4a..32cd9bf017b93 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -355,7 +355,7 @@ local manifest(apps) = pipeline('manifest') { dockerfile: 'loki-build-image/Dockerfile', username: { from_secret: docker_username_secret.name }, password: { from_secret: docker_password_secret.name }, - tags: ['0.20.3'], + tags: ['0.20.4'], dry_run: false, }, }, diff --git a/.drone/drone.yml b/.drone/drone.yml index 5629fb999255b..bef97d742cca1 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -12,7 +12,7 @@ steps: from_secret: docker_password repo: grafana/loki-build-image tags: - - 0.20.3 + - 0.20.4 username: from_secret: docker_username when: @@ -1118,6 +1118,6 @@ kind: secret name: deploy_config --- kind: signature -hmac: 6e2865a871f73b7bec7caad285b3999bfdab4e85e29606061663f4c9160cc35c +hmac: 3e10123a8afd1837124250ba4eb4e6db1f9a324fd47e01371a33e23cf7df0a8a ... diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 3af809fc19535..99a7d05ac4529 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -26,6 +26,12 @@ RUN apk add --no-cache curl && \ cd / && \ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.45.2 +FROM alpine:3.15.4 as buf + +RUN apk add --no-cache curl && \ + curl -sSL "https://github.com/bufbuild/buf/releases/download/v1.4.0/buf-$(uname -s)-$(uname -m)" -o "/usr/bin/buf" && \ + chmod +x "/usr/bin/buf" + FROM alpine:3.15.4 as docker RUN apk add --no-cache docker-cli @@ -69,6 +75,7 @@ COPY --from=docker /usr/bin/docker /usr/bin/docker COPY --from=helm /usr/bin/helm /usr/bin/helm COPY --from=lychee /usr/bin/lychee /usr/bin/lychee COPY --from=golangci /bin/golangci-lint /usr/local/bin +COPY --from=buf /usr/bin/buf /usr/bin/buf COPY --from=drone /usr/local/bin/drone /usr/bin/drone COPY --from=faillint /go/bin/faillint /usr/bin/faillint COPY --from=delve /go/bin/dlv /usr/bin/dlv From 4a91fdcc7ffe8eb9ee08e13ce8909f778444a127 Mon Sep 17 00:00:00 2001 From: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> Date: Mon, 25 Apr 2022 10:28:58 +0300 Subject: [PATCH 014/223] migrated gelfTarget to go-gelf/v2 library that contains fix for chunked messages (#5992) --- .../pkg/promtail/targets/gelf/gelftarget.go | 2 +- .../promtail/targets/gelf/gelftarget_test.go | 110 +++++++++- go.mod | 7 +- go.sum | 7 +- .../grafana/go-gelf/v2}/LICENSE | 0 .../grafana/go-gelf/v2/gelf/defragmentator.go | 53 +++++ .../grafana/go-gelf/v2}/gelf/message.go | 0 .../grafana/go-gelf/v2/gelf/reader.go | 198 ++++++++++++++++++ .../grafana/go-gelf/v2}/gelf/tcpreader.go | 0 .../grafana/go-gelf/v2}/gelf/tcpwriter.go | 0 .../grafana/go-gelf/v2}/gelf/udpwriter.go | 7 +- .../grafana/go-gelf/v2}/gelf/utils.go | 0 .../grafana/go-gelf/v2}/gelf/writer.go | 0 .../testify/assert/assertion_compare.go | 54 ++++- .../assert/assertion_compare_can_convert.go | 16 ++ .../assert/assertion_compare_legacy.go | 16 ++ .../testify/assert/assertion_format.go | 12 ++ .../testify/assert/assertion_forward.go | 24 +++ .../testify/assert/assertion_order.go | 8 +- .../stretchr/testify/assert/assertions.go | 112 ++++++---- .../github.com/stretchr/testify/mock/mock.go | 10 +- .../stretchr/testify/require/require.go | 30 +++ .../testify/require/require_forward.go | 24 +++ .../Graylog2/go-gelf.v2/gelf/reader.go | 144 ------------- vendor/modules.txt | 9 +- 25 files changed, 626 insertions(+), 217 deletions(-) rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/LICENSE (100%) create mode 100644 vendor/github.com/grafana/go-gelf/v2/gelf/defragmentator.go rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/gelf/message.go (100%) create mode 100644 vendor/github.com/grafana/go-gelf/v2/gelf/reader.go rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/gelf/tcpreader.go (100%) rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/gelf/tcpwriter.go (100%) rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/gelf/udpwriter.go (94%) rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/gelf/utils.go (100%) rename vendor/{gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf/v2}/gelf/writer.go (100%) create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go delete mode 100644 vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/reader.go diff --git a/clients/pkg/promtail/targets/gelf/gelftarget.go b/clients/pkg/promtail/targets/gelf/gelftarget.go index e0032531c7fd9..d27ce7d0eb5e8 100644 --- a/clients/pkg/promtail/targets/gelf/gelftarget.go +++ b/clients/pkg/promtail/targets/gelf/gelftarget.go @@ -9,10 +9,10 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/go-gelf/v2/gelf" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "gopkg.in/Graylog2/go-gelf.v2/gelf" "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" diff --git a/clients/pkg/promtail/targets/gelf/gelftarget_test.go b/clients/pkg/promtail/targets/gelf/gelftarget_test.go index fdbb4f04f984a..86a304ef9a7a0 100644 --- a/clients/pkg/promtail/targets/gelf/gelftarget_test.go +++ b/clients/pkg/promtail/targets/gelf/gelftarget_test.go @@ -1,14 +1,19 @@ package gelf import ( + "crypto/rand" + "fmt" + "io" + "net" + "strings" "testing" "time" "github.com/go-kit/log" + "github.com/grafana/go-gelf/v2/gelf" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/require" - "gopkg.in/Graylog2/go-gelf.v2/gelf" "github.com/grafana/loki/clients/pkg/promtail/client/fake" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" @@ -16,12 +21,11 @@ import ( func Test_Gelf(t *testing.T) { client := fake.New(func() {}) - tm, err := NewTargetManager(NewMetrics(nil), log.NewNopLogger(), client, []scrapeconfig.Config{ { JobName: "gelf", GelfConfig: &scrapeconfig.GelfTargetConfig{ - ListenAddress: ":12201", + ListenAddress: ":0", UseIncomingTimestamp: true, Labels: model.LabelSet{"cfg": "true"}, }, @@ -51,9 +55,14 @@ func Test_Gelf(t *testing.T) { }, }) require.NoError(t, err) - - w, err := gelf.NewUDPWriter(":12201") + defer tm.Stop() + target := tm.targets["gelf"] + require.NotNil(t, target) + w, err := gelf.NewUDPWriter(target.gelfReader.Addr()) require.NoError(t, err) + defer func() { + require.NoError(t, w.Close()) + }() baseTs := float64(time.Unix(10, 0).Unix()) + 0.250 ts := baseTs @@ -75,7 +84,7 @@ func Test_Gelf(t *testing.T) { require.Eventually(t, func() bool { return len(client.Received()) == 10 - }, 200*time.Millisecond, 20*time.Millisecond) + }, 1*time.Second, 20*time.Millisecond) for i, actual := range client.Received() { require.Equal(t, "error", string(actual.Labels["level"])) @@ -98,10 +107,95 @@ func Test_Gelf(t *testing.T) { require.Equal(t, "gelftest", gelfMsg.Facility) } - - tm.Stop() } func TestConvertTime(t *testing.T) { require.Equal(t, time.Unix(0, int64(time.Second+(time.Duration(250)*time.Millisecond))), secondsToUnixTimestamp(float64(time.Unix(1, 0).Unix())+0.250)) } + +func Test_GelfChunksUnordered(t *testing.T) { + client := fake.New(func() {}) + + tm, err := NewTargetManager(NewMetrics(nil), log.NewNopLogger(), client, []scrapeconfig.Config{ + { + JobName: "gelf", + GelfConfig: &scrapeconfig.GelfTargetConfig{ + ListenAddress: ":0", + }, + }, + }) + require.NoError(t, err) + defer tm.Stop() + + target := tm.targets["gelf"] + require.NotNil(t, target) + connection, err := net.Dial("udp", target.gelfReader.Addr()) + require.NoError(t, err) + defer func() { + require.NoError(t, connection.Close()) + }() + + chunksA := createChunks(t, "a") + chunksB := createChunks(t, "b") + // send messages(a, b) chunks in order: chunk-0a, chunk-0b, chunk-1a, chunk-1b + for i := 0; i < len(chunksB); i++ { + writeA, err := connection.Write(chunksA[i]) + require.NoError(t, err) + require.Equal(t, len(chunksA[i]), writeA) + + writeB, err := connection.Write(chunksB[i]) + require.NoError(t, err) + require.Equal(t, len(chunksB[i]), writeB) + } + + require.Eventually(t, func() bool { + return len(client.Received()) == 2 + }, 2*time.Second, 100*time.Millisecond, "expected 2 messages to be received") +} + +func createChunks(t *testing.T, char string) [][]byte { + chunksA, err := splitToChunks([]byte(fmt.Sprintf("{\"short_message\":\"%v\"}", strings.Repeat(char, gelf.ChunkSize*2)))) + require.NoError(t, err) + return chunksA +} + +// static value that indicated that GELF message is chunked +var magicChunked = []byte{0x1e, 0x0f} + +const ( + chunkedHeaderLen = 12 + chunkedDataLen = gelf.ChunkSize - chunkedHeaderLen +) + +func splitToChunks(messageBytes []byte) ([][]byte, error) { + chunksCount := uint8(len(messageBytes)/chunkedDataLen + 1) + messageID := make([]byte, 8) + n, err := io.ReadFull(rand.Reader, messageID) + if err != nil || n != 8 { + return nil, fmt.Errorf("rand.Reader: %d/%s", n, err) + } + chunks := make([][]byte, 0, chunksCount) + bytesLeft := len(messageBytes) + for i := uint8(0); i < chunksCount; i++ { + buf := make([]byte, 0, gelf.ChunkSize) + buf = append(buf, magicChunked...) + buf = append(buf, messageID...) + buf = append(buf, i) + buf = append(buf, chunksCount) + chunkLen := chunkedDataLen + if chunkLen > bytesLeft { + chunkLen = bytesLeft + } + off := int(i) * chunkedDataLen + chunkData := messageBytes[off : off+chunkLen] + buf = append(buf, chunkData...) + + chunks = append(chunks, buf) + bytesLeft -= chunkLen + } + + if bytesLeft != 0 { + return nil, fmt.Errorf("error: %d bytes left after splitting", bytesLeft) + } + return chunks, nil +} diff --git a/go.mod b/go.mod index ec04c64b10ca4..f1ca8b19072ec 100644 --- a/go.mod +++ b/go.mod @@ -48,6 +48,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca + github.com/grafana/go-gelf/v2 v2.0.1 github.com/grafana/regexp v0.0.0-20220304100321-149c8afcd6cb github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 @@ -85,7 +86,7 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 github.com/sony/gobreaker v0.4.1 github.com/spf13/afero v1.6.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 github.com/thanos-io/thanos v0.22.0 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.30.0+incompatible @@ -101,7 +102,6 @@ require ( golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 google.golang.org/api v0.70.0 google.golang.org/grpc v1.44.0 - gopkg.in/Graylog2/go-gelf.v2 v2.0.0-20191017102106-1550ee647df0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 @@ -311,7 +311,4 @@ replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0- // is v0.19.1. We pin version from late september here. Feel free to remove when updating to later version. replace github.com/thanos-io/thanos v0.22.0 => github.com/thanos-io/thanos v0.19.1-0.20211126105533-c5505f5eaa7d -// We use a fork of Graylog to avoid leaking goroutine when closing the Promtail target. -replace gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf v0.0.0-20211112153804-126646b86de8 - replace github.com/cloudflare/cloudflare-go => github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93 diff --git a/go.sum b/go.sum index 0224659d21b11..5e258309f8b49 100644 --- a/go.sum +++ b/go.sum @@ -1037,8 +1037,8 @@ github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM= github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca h1:0qHzm6VS0bCsSWKHuyfpt+pdpyScdZbzY/IFIyKSYOk= github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca/go.mod h1:q51XdMLLHNZJSG6KOGujC20ed2OoLFdx0hBmOEVfRs0= -github.com/grafana/go-gelf v0.0.0-20211112153804-126646b86de8 h1:aEOagXOTqtN9gd4jiDuP/5a81HdoJBqkVfn8WaxbsK4= -github.com/grafana/go-gelf v0.0.0-20211112153804-126646b86de8/go.mod h1:QAvS2C7TtQRhhv9Uf/sxD+BUhpkrPFm5jK/9MzUiDCY= +github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= +github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/regexp v0.0.0-20220202152315-e74e38789280/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= @@ -1820,8 +1820,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/LICENSE b/vendor/github.com/grafana/go-gelf/v2/LICENSE similarity index 100% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/LICENSE rename to vendor/github.com/grafana/go-gelf/v2/LICENSE diff --git a/vendor/github.com/grafana/go-gelf/v2/gelf/defragmentator.go b/vendor/github.com/grafana/go-gelf/v2/gelf/defragmentator.go new file mode 100644 index 0000000000000..f19c722f1cd87 --- /dev/null +++ b/vendor/github.com/grafana/go-gelf/v2/gelf/defragmentator.go @@ -0,0 +1,53 @@ +package gelf + +import ( + "bytes" + "time" +) + +// defragmentator provides GELF message de-chunking. +type defragmentator struct { + deadline time.Time + chunksData [][]byte + processed int + totalBytes int +} + +// newDefragmentator returns empty defragmentator with maximum message size and duration +// specified. +func newDefragmentator(timeout time.Duration) (res *defragmentator) { + res = new(defragmentator) + res.deadline = time.Now().Add(timeout) + return +} + +// bytes returns message bytes, not nessesarily fully defragmentated. +func (a *defragmentator) bytes() []byte { + return bytes.Join(a.chunksData, nil) +} + +// expired returns true if first chunk is too old. +func (a *defragmentator) expired() bool { + return time.Now().After(a.deadline) +} + +// update feeds the byte chunk to defragmentator, returns true when the message is +// complete. +func (a *defragmentator) update(chunk []byte) bool { + // each message contains index of current chunk and total count of chunks + chunkIndex, count := int(chunk[10]), int(chunk[11]) + if a.chunksData == nil { + a.chunksData = make([][]byte, count) + } + if count != len(a.chunksData) || chunkIndex >= count { + return false + } + body := chunk[chunkedHeaderLen:] + if a.chunksData[chunkIndex] == nil { + chunkData := make([]byte, 0, len(body)) + a.totalBytes += len(body) + a.chunksData[chunkIndex] = append(chunkData, body...) + a.processed++ + } + return a.processed == len(a.chunksData) +} diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/message.go b/vendor/github.com/grafana/go-gelf/v2/gelf/message.go similarity index 100% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/message.go rename to vendor/github.com/grafana/go-gelf/v2/gelf/message.go diff --git a/vendor/github.com/grafana/go-gelf/v2/gelf/reader.go b/vendor/github.com/grafana/go-gelf/v2/gelf/reader.go new file mode 100644 index 0000000000000..ca4c9b203f39b --- /dev/null +++ b/vendor/github.com/grafana/go-gelf/v2/gelf/reader.go @@ -0,0 +1,198 @@ +// Copyright 2012 SocialCode. All rights reserved. +// Use of this source code is governed by the MIT +// license that can be found in the LICENSE file. + +package gelf + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "sync" + "time" +) + +const ( + //according to Gelf specification, all chunks must be received within 5 seconds. see:https://docs.graylog.org/docs/gelf + maxMessageTimeout = 5 * time.Second + defarmentatorsCleanUpInterval = 5 * time.Second +) + +type Reader struct { + mu sync.Mutex + conn net.Conn + messageDefragmentators map[string]*defragmentator + done chan struct{} + maxMessageTimeout time.Duration +} + +// NewReader creates a reader for specified addr +func NewReader(addr string) (*Reader, error) { + return newReader(addr, maxMessageTimeout, defarmentatorsCleanUpInterval) +} + +func newReader(addr string, maxMessageTimeout time.Duration, defarmentatorsCleanUpInterval time.Duration) (*Reader, error) { + var err error + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) + } + + conn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return nil, fmt.Errorf("ListenUDP: %s", err) + } + + r := new(Reader) + r.maxMessageTimeout = maxMessageTimeout + r.conn = conn + r.messageDefragmentators = make(map[string]*defragmentator) + r.done = make(chan struct{}) + r.initDefragmentatorsCleanup(defarmentatorsCleanUpInterval) + return r, nil +} + +func (r *Reader) Addr() string { + return r.conn.LocalAddr().String() +} + +// FIXME: this will discard data if p isn't big enough to hold the +// full message. +func (r *Reader) Read(p []byte) (int, error) { + msg, err := r.ReadMessage() + if err != nil { + return -1, err + } + + var data string + + if msg.Full == "" { + data = msg.Short + } else { + data = msg.Full + } + + return strings.NewReader(data).Read(p) +} + +// ReadMessage reads the message from connection. +// +// If reader receives a message that is not chunked, the reader returns it immediately. +// +// If reader receives a message that is chunked, the reader finds or creates defragmentator by messageID and adds the message to the defragmentator +// and continue reading the message from the connection until it receives the last chunk of any previously received messages or if the reader receives not chunked message. +// In this case, not chunked message will be returned immediately. +func (r *Reader) ReadMessage() (*Message, error) { + cBuf := make([]byte, ChunkSize) + var ( + err error + n int + chunkHead []byte + cReader io.Reader + ) + var message []byte + + for { + // we need to reset buffer length because we change the length of the buffer to `n` after the reading data from connection + cBuf = cBuf[:ChunkSize] + if n, err = r.conn.Read(cBuf); err != nil { + return nil, fmt.Errorf("Read: %s", err) + } + // first two bytes contains hardcoded values [0x1e, 0x0f] if message is chunked + chunkHead, cBuf = cBuf[:2], cBuf[:n] + if bytes.Equal(chunkHead, magicChunked) { + if len(cBuf) <= chunkedHeaderLen { + return nil, fmt.Errorf("chunked message size must be greather than %v", chunkedHeaderLen) + } + // in chunked message, message id is 8 bytes after the message head + messageID := string(cBuf[2 : 2+8]) + deframentator := getDeframentator(r, messageID) + if deframentator.processed >= maxChunksCount { + return nil, fmt.Errorf("message must not be split into more than %v chunks", maxChunksCount) + } + if deframentator.expired() { + return nil, fmt.Errorf("message with ID: %v is expired", messageID) + } + if messageCompleted := deframentator.update(cBuf); !messageCompleted { + continue + } + message = deframentator.bytes() + chunkHead = message[:2] + r.mu.Lock() + delete(r.messageDefragmentators, messageID) + r.mu.Unlock() + } else { + message = cBuf + } + break + } + + // the data we get from the wire is compressed + if bytes.Equal(chunkHead, magicGzip) { + cReader, err = gzip.NewReader(bytes.NewReader(message)) + } else if chunkHead[0] == magicZlib[0] && + (int(chunkHead[0])*256+int(chunkHead[1]))%31 == 0 { + // zlib is slightly more complicated, but correct + cReader, err = zlib.NewReader(bytes.NewReader(message)) + } else { + // compliance with https://github.com/Graylog2/graylog2-server + // treating all messages as uncompressed if they are not gzip, zlib or + // chunked + cReader = bytes.NewReader(message) + } + + if err != nil { + return nil, fmt.Errorf("NewReader: %s", err) + } + + msg := new(Message) + if err := json.NewDecoder(cReader).Decode(&msg); err != nil { + return nil, fmt.Errorf("json.Unmarshal: %s", err) + } + + return msg, nil +} + +func getDeframentator(r *Reader, messageID string) *defragmentator { + r.mu.Lock() + defer r.mu.Unlock() + defragmentator, ok := r.messageDefragmentators[messageID] + if !ok { + defragmentator = newDefragmentator(r.maxMessageTimeout) + r.messageDefragmentators[messageID] = defragmentator + } + return defragmentator +} + +func (r *Reader) Close() error { + close(r.done) + return r.conn.Close() +} + +func (r *Reader) initDefragmentatorsCleanup(defarmentatorsCleanUpInterval time.Duration) { + go func() { + for { + select { + case <-r.done: + return + case <-time.After(defarmentatorsCleanUpInterval): + r.cleanUpExpiredDefragmentators() + } + } + }() +} + +func (r *Reader) cleanUpExpiredDefragmentators() { + r.mu.Lock() + defer r.mu.Unlock() + for messageID, defragmentator := range r.messageDefragmentators { + if defragmentator.expired() { + delete(r.messageDefragmentators, messageID) + } + } +} diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/tcpreader.go b/vendor/github.com/grafana/go-gelf/v2/gelf/tcpreader.go similarity index 100% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/tcpreader.go rename to vendor/github.com/grafana/go-gelf/v2/gelf/tcpreader.go diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/tcpwriter.go b/vendor/github.com/grafana/go-gelf/v2/gelf/tcpwriter.go similarity index 100% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/tcpwriter.go rename to vendor/github.com/grafana/go-gelf/v2/gelf/tcpwriter.go diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/udpwriter.go b/vendor/github.com/grafana/go-gelf/v2/gelf/udpwriter.go similarity index 94% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/udpwriter.go rename to vendor/github.com/grafana/go-gelf/v2/gelf/udpwriter.go index 23bbd5e510eb0..95061344f2626 100644 --- a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/udpwriter.go +++ b/vendor/github.com/grafana/go-gelf/v2/gelf/udpwriter.go @@ -42,6 +42,9 @@ const ( ChunkSize = 1420 chunkedHeaderLen = 12 chunkedDataLen = ChunkSize - chunkedHeaderLen + // maxChunksCount is limited by the protocol to a maximum of 128 + // https://docs.graylog.org/docs/gelf#gelf-via-udp + maxChunksCount = 128 ) var ( @@ -90,8 +93,8 @@ func (w *GelfWriter) writeChunked(zBytes []byte) (err error) { b := make([]byte, 0, ChunkSize) buf := bytes.NewBuffer(b) nChunksI := numChunks(zBytes) - if nChunksI > 128 { - return fmt.Errorf("msg too large, would need %d chunks", nChunksI) + if nChunksI > maxChunksCount { + return fmt.Errorf("msg too large, would need %d chunks which execeeds the limit of %d chunks", nChunksI, maxChunksCount) } nChunks := uint8(nChunksI) // use urandom to get a unique message id diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/utils.go b/vendor/github.com/grafana/go-gelf/v2/gelf/utils.go similarity index 100% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/utils.go rename to vendor/github.com/grafana/go-gelf/v2/gelf/utils.go diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/writer.go b/vendor/github.com/grafana/go-gelf/v2/gelf/writer.go similarity index 100% rename from vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/writer.go rename to vendor/github.com/grafana/go-gelf/v2/gelf/writer.go diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d2679246..3bb22a9718eb8 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -3,6 +3,7 @@ package assert import ( "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +31,8 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } } return compareEqual, false @@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 0000000000000..df22c47fc50aa --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatability +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 0000000000000..1701af2a3c89c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a8617..27e2420ed2e76 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07e6e..d9ea368d0a355 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a726..7594487835856 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f57fb..0357b2231a2cd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } subsetValue := reflect.ValueOf(subset) @@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index e2e6a2d237df0..853da6cce2de9 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -221,6 +221,14 @@ type Mock struct { mutex sync.Mutex } +// String provides a %v format string for Mock. +// Note: this is used implicitly by Arguments.Diff if a Mock is passed. +// It exists because go's default %v formatting traverses the struct +// without acquiring the mutex, which is detected by go test -race. +func (m *Mock) String() string { + return fmt.Sprintf("%[1]T<%[1]p>", m) +} + // TestData holds any data that might be useful for testing. Testify ignores // this data completely allowing you to do whatever you like with it. func (m *Mock) TestData() objx.Map { @@ -720,7 +728,7 @@ func (f argumentMatcher) Matches(argument interface{}) bool { } func (f argumentMatcher) String() string { - return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).String()) } // MatchedBy can be used to match a mock call based on only certain properties diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2e6726..59c48277ac6d3 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83f35e..5bb07c89c68b7 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/reader.go b/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/reader.go deleted file mode 100644 index 83df304e83aa6..0000000000000 --- a/vendor/gopkg.in/Graylog2/go-gelf.v2/gelf/reader.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 SocialCode. All rights reserved. -// Use of this source code is governed by the MIT -// license that can be found in the LICENSE file. - -package gelf - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" -) - -type Reader struct { - mu sync.Mutex - conn net.Conn -} - -func NewReader(addr string) (*Reader, error) { - var err error - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) - } - - conn, err := net.ListenUDP("udp", udpAddr) - if err != nil { - return nil, fmt.Errorf("ListenUDP: %s", err) - } - - r := new(Reader) - r.conn = conn - return r, nil -} - -func (r *Reader) Addr() string { - return r.conn.LocalAddr().String() -} - -// FIXME: this will discard data if p isn't big enough to hold the -// full message. -func (r *Reader) Read(p []byte) (int, error) { - msg, err := r.ReadMessage() - if err != nil { - return -1, err - } - - var data string - - if msg.Full == "" { - data = msg.Short - } else { - data = msg.Full - } - - return strings.NewReader(data).Read(p) -} - -func (r *Reader) ReadMessage() (*Message, error) { - cBuf := make([]byte, ChunkSize) - var ( - err error - n, length int - cid, ocid []byte - seq, total uint8 - cHead []byte - cReader io.Reader - chunks [][]byte - ) - - for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { - if n, err = r.conn.Read(cBuf); err != nil { - return nil, fmt.Errorf("Read: %s", err) - } - cHead, cBuf = cBuf[:2], cBuf[:n] - - if bytes.Equal(cHead, magicChunked) { - // fmt.Printf("chunked %v\n", cBuf[:14]) - cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] - if ocid != nil && !bytes.Equal(cid, ocid) { - return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) - } else if ocid == nil { - ocid = cid - chunks = make([][]byte, total) - } - n = len(cBuf) - chunkedHeaderLen - // fmt.Printf("setting chunks[%d]: %d\n", seq, n) - chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) - length += n - } else { // not chunked - if total > 0 { - return nil, fmt.Errorf("out-of-band message (not chunked)") - } - break - } - } - // fmt.Printf("\nchunks: %v\n", chunks) - - if length > 0 { - if cap(cBuf) < length { - cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) - } - cBuf = cBuf[:0] - for i := range chunks { - // fmt.Printf("appending %d %v\n", i, chunks[i]) - cBuf = append(cBuf, chunks[i]...) - } - cHead = cBuf[:2] - } - - // the data we get from the wire is compressed - if bytes.Equal(cHead, magicGzip) { - cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) - } else if cHead[0] == magicZlib[0] && - (int(cHead[0])*256+int(cHead[1]))%31 == 0 { - // zlib is slightly more complicated, but correct - cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) - } else { - // compliance with https://github.com/Graylog2/graylog2-server - // treating all messages as uncompressed if they are not gzip, zlib or - // chunked - cReader = bytes.NewReader(cBuf) - } - - if err != nil { - return nil, fmt.Errorf("NewReader: %s", err) - } - - msg := new(Message) - if err := json.NewDecoder(cReader).Decode(&msg); err != nil { - return nil, fmt.Errorf("json.Unmarshal: %s", err) - } - - return msg, nil -} - -func (r *Reader) Close() error { - return r.conn.Close() -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 201df80004db4..90e2c6ab9364c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -546,6 +546,9 @@ github.com/grafana/dskit/runtimeconfig github.com/grafana/dskit/services github.com/grafana/dskit/spanlogger github.com/grafana/dskit/tenant +# github.com/grafana/go-gelf/v2 v2.0.1 +## explicit; go 1.17 +github.com/grafana/go-gelf/v2/gelf # github.com/grafana/regexp v0.0.0-20220304100321-149c8afcd6cb ## explicit; go 1.17 github.com/grafana/regexp @@ -967,7 +970,7 @@ github.com/spf13/pflag # github.com/stretchr/objx v0.2.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.7.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock @@ -1396,9 +1399,6 @@ google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/Graylog2/go-gelf.v2 v2.0.0-20191017102106-1550ee647df0 => github.com/grafana/go-gelf v0.0.0-20211112153804-126646b86de8 -## explicit -gopkg.in/Graylog2/go-gelf.v2/gelf # gopkg.in/alecthomas/kingpin.v2 v2.2.6 ## explicit gopkg.in/alecthomas/kingpin.v2 @@ -1660,5 +1660,4 @@ sigs.k8s.io/yaml # github.com/hashicorp/consul => github.com/hashicorp/consul v1.5.1 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab -# gopkg.in/Graylog2/go-gelf.v2 => github.com/grafana/go-gelf v0.0.0-20211112153804-126646b86de8 # github.com/cloudflare/cloudflare-go => github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93 From 2a04ac9af709b1199f9b2e442b34945b70065298 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Mon, 25 Apr 2022 16:06:47 -0500 Subject: [PATCH 015/223] Remove references to cortex (#6015) --- docs/sources/configuration/_index.md | 34 ++----------------- .../chunk/client/azure/blob_storage_client.go | 2 +- 2 files changed, 3 insertions(+), 33 deletions(-) diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 8067e753a61c4..4861c24537c44 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -742,7 +742,7 @@ The `azure_storage_config` configures Azure as a general storage for different d # Name of the blob container used to store chunks. This container must be # created before running cortex. # CLI flag: -.azure.container-name -[container_name: | default = "cortex"] +[container_name: | default = "loki"] # The Microsoft Azure account name to be used # CLI flag: -.azure.account-name @@ -938,7 +938,7 @@ The `swift_storage_config` configures Swift as a general storage for different d # Name of the Swift container to put chunks in. # CLI flag: -.swift.container-name -[container_name: | default = "cortex"] +[container_name: | default = ""] ``` ## hedging @@ -1489,36 +1489,6 @@ aws: # CLI flag: -metrics.ignore-throttle-below [ignore_throttle_below: | default = 1] - # Query to fetch ingester queue length - # CLI flag: -metrics.queue-length-query - [queue_length_query: | - default = "sum(avg_over_time(cortex_ingester_flush_queue_length{job="cortex/ingester"}[2m]))"] - - # Query to fetch throttle rates per table - # CLI flag: -metrics.write-throttle-query - [write_throttle_query: | - default = "sum(rate(cortex_dynamo_throttled_total{operation="DynamoDB.BatchWriteItem"}[1m])) - by (table) > 0"] - - # Query to fetch write capacity usage per table - # CLI flag: -metrics.usage-query - [write_usage_query: | - default = - "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.BatchWriteItem"}[15m])) - by (table) > 0"] - - # Query to fetch read capacity usage per table - # CLI flag: -metrics.read-usage-query - [read_usage_query: | - default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.QueryPages"}[1h])) - by (table) > 0"] - - # Query to fetch read errors per table - # CLI flag: -metrics.read-error-query - [read_error_query: | - default = "sum(increase(cortex_dynamo_failures_total{operation="DynamoDB.QueryPages", - error="ProvisionedThroughputExceededException"}[1m])) by (table) > 0"] - # Number of chunks to group together to parallelise fetches (0 to disable) # CLI flag: -dynamodb.chunk-gang-size [chunk_gang_size: | default = 10] diff --git a/pkg/storage/chunk/client/azure/blob_storage_client.go b/pkg/storage/chunk/client/azure/blob_storage_client.go index 163b4a28cd8cd..f74ade5f6811f 100644 --- a/pkg/storage/chunk/client/azure/blob_storage_client.go +++ b/pkg/storage/chunk/client/azure/blob_storage_client.go @@ -105,7 +105,7 @@ func (c *BlobStorageConfig) RegisterFlags(f *flag.FlagSet) { // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&c.Environment, prefix+"azure.environment", azureGlobal, fmt.Sprintf("Azure Cloud environment. Supported values are: %s.", strings.Join(supportedEnvironments, ", "))) - f.StringVar(&c.ContainerName, prefix+"azure.container-name", "cortex", "Name of the blob container used to store chunks. This container must be created before running cortex.") + f.StringVar(&c.ContainerName, prefix+"azure.container-name", "loki", "Name of the blob container used to store chunks. This container must be created before running cortex.") f.StringVar(&c.AccountName, prefix+"azure.account-name", "", "The Microsoft Azure account name to be used") f.StringVar(&c.ChunkDelimiter, prefix+"azure.chunk-delimiter", "-", "Chunk delimiter for blob ID to be used") f.Var(&c.AccountKey, prefix+"azure.account-key", "The Microsoft Azure account key to use.") From bcb800967904ad627371e0067008db8f500b2cce Mon Sep 17 00:00:00 2001 From: Gerard Vanloo Date: Tue, 26 Apr 2022 00:46:13 -0400 Subject: [PATCH 016/223] operator: logerr v2 update (#5987) --- operator/CHANGELOG.md | 1 + operator/cmd/loki-broker/main.go | 2 +- operator/cmd/size-calculator/main.go | 6 +- .../internal/management/state/state.go | 9 +- .../internal/management/state/state_test.go | 9 +- operator/controllers/lokistack_controller.go | 2 +- .../controllers/lokistack_controller_test.go | 11 +-- operator/go.mod | 3 +- operator/go.sum | 4 +- .../handlers/internal/gateway/base_domain.go | 2 +- .../handlers/internal/gateway/modes.go | 2 +- .../internal/gateway/tenant_configmap.go | 13 +-- .../internal/gateway/tenant_configmap_test.go | 9 +- .../internal/gateway/tenant_secrets.go | 2 +- .../handlers/internal/secrets/secrets.go | 2 +- .../handlers/lokistack_create_or_update.go | 9 +- .../lokistack_create_or_update_test.go | 11 +-- operator/internal/manifests/build.go | 2 +- operator/internal/manifests/gateway.go | 2 +- .../internal/manifests/gateway_tenants.go | 2 +- .../manifests/internal/alerts/build.go | 2 +- .../manifests/internal/config/build.go | 2 +- .../manifests/internal/gateway/build.go | 2 +- operator/internal/manifests/mutate.go | 99 +++++++++++-------- operator/internal/manifests/mutate_test.go | 31 +++--- .../internal/manifests/openshift/configure.go | 2 +- .../internal/manifests/service_monitor.go | 2 +- operator/internal/sizes/predict.go | 2 +- operator/internal/status/components.go | 2 +- operator/internal/status/lokistack.go | 2 +- operator/internal/status/status.go | 2 +- operator/main.go | 4 +- 32 files changed, 129 insertions(+), 126 deletions(-) diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 6d33c918dcaa1..56343774f3868 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [5987](https://github.com/grafana/loki/pull/5987) **Red-GV**: Update logerr to v2.0.0 - [5907](https://github.com/grafana/loki/pull/5907) **xperimental**: Do not include non-static labels in pod selectors - [5893](https://github.com/grafana/loki/pull/5893) **periklis**: Align PVC storage size requests for all lokistack t-shirt sizes - [5884](https://github.com/grafana/loki/pull/5884) **periklis**: Update Loki operand to v2.5.0 diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go index cc7ed61df87ca..318ea113d11f5 100644 --- a/operator/cmd/loki-broker/main.go +++ b/operator/cmd/loki-broker/main.go @@ -8,7 +8,7 @@ import ( "path" "strings" - "github.com/ViaQ/logerr/log" + "github.com/ViaQ/logerr/v2/log" "github.com/go-logr/logr" "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/manifests" diff --git a/operator/cmd/size-calculator/main.go b/operator/cmd/size-calculator/main.go index 09878d05d884e..fb5f404348a50 100755 --- a/operator/cmd/size-calculator/main.go +++ b/operator/cmd/size-calculator/main.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/loki/operator/internal/sizes" "github.com/prometheus/common/model" - "github.com/ViaQ/logerr/log" + "github.com/ViaQ/logerr/v2/log" ) const ( @@ -29,9 +29,9 @@ const ( sizeOneXMedium string = "1x.medium" ) -var logger = log.NewLogger("size-calculator") - func main() { + logger := log.NewLogger("size-calculator") + logger.Info("starting storage size calculator...") for { diff --git a/operator/controllers/internal/management/state/state.go b/operator/controllers/internal/management/state/state.go index b3ff994ea9de2..bff7d7e1aed0b 100644 --- a/operator/controllers/internal/management/state/state.go +++ b/operator/controllers/internal/management/state/state.go @@ -6,21 +6,16 @@ import ( lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/external/k8s" - "github.com/ViaQ/logerr/kverrors" - "github.com/go-logr/logr" + "github.com/ViaQ/logerr/v2/kverrors" apierrors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" ) // IsManaged checks if the custom resource is configured with ManagementState Managed. -func IsManaged(ctx context.Context, log logr.Logger, req ctrl.Request, k k8s.Client) (bool, error) { - ll := log.WithValues("lokistack", req.NamespacedName) - +func IsManaged(ctx context.Context, req ctrl.Request, k k8s.Client) (bool, error) { var stack lokiv1beta1.LokiStack if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { if apierrors.IsNotFound(err) { - // maybe the user deleted it before we could react? Either way this isn't an issue - ll.Error(err, "could not find the requested loki stack", "name", req.NamespacedName) return false, nil } return false, kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) diff --git a/operator/controllers/internal/management/state/state_test.go b/operator/controllers/internal/management/state/state_test.go index b5c7edba2e5c7..3255ab068f31c 100644 --- a/operator/controllers/internal/management/state/state_test.go +++ b/operator/controllers/internal/management/state/state_test.go @@ -8,8 +8,7 @@ import ( "github.com/grafana/loki/operator/controllers/internal/management/state" "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" - "github.com/ViaQ/logerr/kverrors" - "github.com/ViaQ/logerr/log" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/stretchr/testify/require" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -19,8 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var logger = log.DefaultLogger() - func TestIsManaged(t *testing.T) { type test struct { name string @@ -76,7 +73,7 @@ func TestIsManaged(t *testing.T) { k.SetClientObject(object, &tst.stack) return nil } - ok, err := state.IsManaged(context.TODO(), logger, r, k) + ok, err := state.IsManaged(context.TODO(), r, k) require.NoError(t, err) require.Equal(t, ok, tst.wantOk) }) @@ -112,7 +109,7 @@ func TestIsManaged_WhenError_ReturnNotManagedWithError(t *testing.T) { for _, tst := range table { t.Run(tst.name, func(t *testing.T) { k.GetReturns(tst.apierror) - ok, err := state.IsManaged(context.TODO(), logger, r, k) + ok, err := state.IsManaged(context.TODO(), r, k) require.Equal(t, tst.wantErr, err) require.False(t, ok) }) diff --git a/operator/controllers/lokistack_controller.go b/operator/controllers/lokistack_controller.go index da66c6e26f0c4..3e46047dd8ec4 100644 --- a/operator/controllers/lokistack_controller.go +++ b/operator/controllers/lokistack_controller.go @@ -93,7 +93,7 @@ type LokiStackReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - ok, err := state.IsManaged(ctx, r.Log, req, r.Client) + ok, err := state.IsManaged(ctx, req, r.Client) if err != nil { return ctrl.Result{ Requeue: true, diff --git a/operator/controllers/lokistack_controller_test.go b/operator/controllers/lokistack_controller_test.go index 801c5d8137ca4..17a24ab6e9782 100644 --- a/operator/controllers/lokistack_controller_test.go +++ b/operator/controllers/lokistack_controller_test.go @@ -10,7 +10,8 @@ import ( "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" "github.com/grafana/loki/operator/internal/manifests" - "github.com/ViaQ/logerr/log" + "github.com/ViaQ/logerr/v2/log" + "github.com/go-logr/logr" routev1 "github.com/openshift/api/route/v1" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -25,7 +26,7 @@ import ( ) var ( - logger = log.NewLogger("testing") + logger logr.Logger scheme = runtime.NewScheme() ) @@ -34,12 +35,10 @@ func TestMain(m *testing.M) { testing.Init() flag.Parse() - sink := log.MustGetSink(logger) if testing.Verbose() { - // set to the highest for verbose testing - sink.SetVerbosity(5) + logger = log.NewLogger("testing", log.WithVerbosity(5)) } else { - sink.SetOutput(ioutil.Discard) + logger = log.NewLogger("testing", log.WithOutput(ioutil.Discard)) } // Register the clientgo and CRD schemes diff --git a/operator/go.mod b/operator/go.mod index 7bcad1a9f1f4b..be2f59c8e1702 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -3,7 +3,6 @@ module github.com/grafana/loki/operator go 1.17 require ( - github.com/ViaQ/logerr v1.1.0 github.com/go-logr/logr v1.2.3 github.com/google/uuid v1.1.2 github.com/imdario/mergo v0.3.12 @@ -21,6 +20,8 @@ require ( sigs.k8s.io/yaml v1.3.0 ) +require github.com/ViaQ/logerr/v2 v2.0.0 + require ( cloud.google.com/go v0.81.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect diff --git a/operator/go.sum b/operator/go.sum index f650bbbab3c6b..236024558d4fa 100644 --- a/operator/go.sum +++ b/operator/go.sum @@ -72,8 +72,8 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/ViaQ/logerr v1.1.0 h1:Jm+WBMbKUcwiDV/aJOIB5Rv5mg0UnyULRs1Jbz5Qq8U= -github.com/ViaQ/logerr v1.1.0/go.mod h1:D0eovRXC5iBP/jW8Nb3mF25JVxg16eAEmwHBsrGCXlI= +github.com/ViaQ/logerr/v2 v2.0.0 h1:5NOOexPjkhaga6E13JJfPa0vRtyW+nIvTYUDrheZETM= +github.com/ViaQ/logerr/v2 v2.0.0/go.mod h1:/qoWLm3YG40Sv5u75s4fvzjZ5p36xINzaxU2L+DJ9uw= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= diff --git a/operator/internal/handlers/internal/gateway/base_domain.go b/operator/internal/handlers/internal/gateway/base_domain.go index f5c84fc6871dd..5bdf6739ca471 100644 --- a/operator/internal/handlers/internal/gateway/base_domain.go +++ b/operator/internal/handlers/internal/gateway/base_domain.go @@ -3,7 +3,7 @@ package gateway import ( "context" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/status" diff --git a/operator/internal/handlers/internal/gateway/modes.go b/operator/internal/handlers/internal/gateway/modes.go index 53a8b95461f93..cef3f5f7c9369 100644 --- a/operator/internal/handlers/internal/gateway/modes.go +++ b/operator/internal/handlers/internal/gateway/modes.go @@ -1,7 +1,7 @@ package gateway import ( - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" ) diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap.go b/operator/internal/handlers/internal/gateway/tenant_configmap.go index 76088f5ea91be..3b83f47b0b1a4 100644 --- a/operator/internal/handlers/internal/gateway/tenant_configmap.go +++ b/operator/internal/handlers/internal/gateway/tenant_configmap.go @@ -7,8 +7,7 @@ import ( "github.com/grafana/loki/operator/internal/manifests" "github.com/grafana/loki/operator/internal/manifests/openshift" - "github.com/ViaQ/logerr/kverrors" - "github.com/go-logr/logr" + "github.com/ViaQ/logerr/v2/kverrors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/json" ctrl "sigs.k8s.io/controller-runtime" @@ -39,18 +38,16 @@ type openShiftSpec struct { // GetTenantConfigMapData returns the tenantName, tenantId, cookieSecret // clusters to auto-create redirect URLs for OpenShift Auth or an error. -func GetTenantConfigMapData(ctx context.Context, log logr.Logger, k k8s.Client, req ctrl.Request) map[string]openshift.TenantData { +func GetTenantConfigMapData(ctx context.Context, k k8s.Client, req ctrl.Request) (map[string]openshift.TenantData, error) { var tenantConfigMap corev1.ConfigMap key := client.ObjectKey{Name: manifests.GatewayName(req.Name), Namespace: req.Namespace} if err := k.Get(ctx, key, &tenantConfigMap); err != nil { - log.Error(err, "couldn't find") - return nil + return nil, kverrors.Wrap(err, "couldn't find tenant configMap.") } tcm, err := extractTenantConfigMap(&tenantConfigMap) if err != nil { - log.Error(err, "error occurred in extracting tenants.yaml configMap.") - return nil + return nil, kverrors.Wrap(err, "error occurred in extracting tenants.yaml configMap.") } tcmMap := make(map[string]openshift.TenantData) @@ -60,7 +57,7 @@ func GetTenantConfigMapData(ctx context.Context, log logr.Logger, k k8s.Client, } } - return tcmMap + return tcmMap, nil } // extractTenantConfigMap extracts tenants.yaml data if valid. diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go b/operator/internal/handlers/internal/gateway/tenant_configmap_test.go index de50259829de9..3aa57e5766326 100644 --- a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go +++ b/operator/internal/handlers/internal/gateway/tenant_configmap_test.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" "github.com/grafana/loki/operator/internal/manifests/openshift" - "github.com/ViaQ/logerr/log" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,8 +34,6 @@ tenants: cookieSecret: test789 `) -var logger = log.DefaultLogger() - func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { k := &k8sfakes.FakeClient{} r := ctrl.Request{ @@ -61,8 +58,9 @@ func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { return nil } - ts := GetTenantConfigMapData(context.TODO(), logger, k, r) + ts, err := GetTenantConfigMapData(context.TODO(), k, r) require.NotNil(t, ts) + require.NoError(t, err) expected := map[string]openshift.TenantData{ "application": { @@ -91,6 +89,7 @@ func TestGetTenantConfigMapData_ConfigMapNotExist(t *testing.T) { return nil } - ts := GetTenantConfigMapData(context.TODO(), logger, k, r) + ts, err := GetTenantConfigMapData(context.TODO(), k, r) require.Nil(t, ts) + require.Error(t, err) } diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go index 45273a7ec55f8..4cc677b4fc824 100644 --- a/operator/internal/handlers/internal/gateway/tenant_secrets.go +++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/external/k8s" diff --git a/operator/internal/handlers/internal/secrets/secrets.go b/operator/internal/handlers/internal/secrets/secrets.go index 6167785469214..4460b6dc0e9d4 100644 --- a/operator/internal/handlers/internal/secrets/secrets.go +++ b/operator/internal/handlers/internal/secrets/secrets.go @@ -1,7 +1,7 @@ package secrets import ( - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/manifests" "github.com/grafana/loki/operator/internal/manifests/storage" diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go index b5eb20f447022..d2bb85b576779 100644 --- a/operator/internal/handlers/lokistack_create_or_update.go +++ b/operator/internal/handlers/lokistack_create_or_update.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/loki/operator/internal/metrics" "github.com/grafana/loki/operator/internal/status" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -111,7 +111,10 @@ func CreateOrUpdateLokiStack( } // extract the existing tenant's id, cookieSecret if exists, otherwise create new. - tenantConfigMap = gateway.GetTenantConfigMapData(ctx, log, k, req) + tenantConfigMap, err = gateway.GetTenantConfigMapData(ctx, k, req) + if err != nil { + ll.Error(err, "error in getting tenant config map data") + } } } @@ -169,7 +172,7 @@ func CreateOrUpdateLokiStack( } desired := obj.DeepCopyObject().(client.Object) - mutateFn := manifests.MutateFuncFor(log, obj, desired) + mutateFn := manifests.MutateFuncFor(obj, desired) op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn) if err != nil { diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go index 43ae8cee17884..d6d24df1b4f49 100644 --- a/operator/internal/handlers/lokistack_create_or_update_test.go +++ b/operator/internal/handlers/lokistack_create_or_update_test.go @@ -14,7 +14,8 @@ import ( "github.com/grafana/loki/operator/internal/manifests" "github.com/grafana/loki/operator/internal/status" - "github.com/ViaQ/logerr/log" + "github.com/ViaQ/logerr/v2/log" + "github.com/go-logr/logr" routev1 "github.com/openshift/api/route/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,7 +33,7 @@ import ( ) var ( - logger = log.NewLogger("testing") + logger logr.Logger scheme = runtime.NewScheme() flags = manifests.FeatureFlags{ @@ -80,12 +81,10 @@ func TestMain(m *testing.M) { testing.Init() flag.Parse() - sink := log.MustGetSink(logger) if testing.Verbose() { - // set to the highest for verbose testing - sink.SetVerbosity(5) + logger = log.NewLogger("testing", log.WithVerbosity(5)) } else { - sink.SetOutput(ioutil.Discard) + logger = log.NewLogger("testing", log.WithOutput(ioutil.Discard)) } // Register the clientgo and CRD schemes diff --git a/operator/internal/manifests/build.go b/operator/internal/manifests/build.go index 6e811aeacc97b..8352a90e6387c 100644 --- a/operator/internal/manifests/build.go +++ b/operator/internal/manifests/build.go @@ -1,7 +1,7 @@ package manifests import ( - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/manifests/internal" diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go index d176c576a911b..20361ff47a04a 100644 --- a/operator/internal/manifests/gateway.go +++ b/operator/internal/manifests/gateway.go @@ -5,7 +5,7 @@ import ( "fmt" "path" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/imdario/mergo" "github.com/grafana/loki/operator/internal/manifests/internal/gateway" diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go index c4a693dc80623..f049adaaf9626 100644 --- a/operator/internal/manifests/gateway_tenants.go +++ b/operator/internal/manifests/gateway_tenants.go @@ -1,7 +1,7 @@ package manifests import ( - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/manifests/internal/gateway" "github.com/grafana/loki/operator/internal/manifests/openshift" diff --git a/operator/internal/manifests/internal/alerts/build.go b/operator/internal/manifests/internal/alerts/build.go index 12e8c35bf8acb..b1bf76558af16 100644 --- a/operator/internal/manifests/internal/alerts/build.go +++ b/operator/internal/manifests/internal/alerts/build.go @@ -6,7 +6,7 @@ import ( "io" "text/template" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "k8s.io/apimachinery/pkg/util/yaml" ) diff --git a/operator/internal/manifests/internal/config/build.go b/operator/internal/manifests/internal/config/build.go index 3bb49021cf8c4..b5fad138abd73 100644 --- a/operator/internal/manifests/internal/config/build.go +++ b/operator/internal/manifests/internal/config/build.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "text/template" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" ) const ( diff --git a/operator/internal/manifests/internal/gateway/build.go b/operator/internal/manifests/internal/gateway/build.go index caeca7046f4c0..67e434220f9a7 100644 --- a/operator/internal/manifests/internal/gateway/build.go +++ b/operator/internal/manifests/internal/gateway/build.go @@ -8,7 +8,7 @@ import ( lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" ) const ( diff --git a/operator/internal/manifests/mutate.go b/operator/internal/manifests/mutate.go index 6b887528a4120..e13a46c88c1da 100644 --- a/operator/internal/manifests/mutate.go +++ b/operator/internal/manifests/mutate.go @@ -3,8 +3,7 @@ package manifests import ( "reflect" - "github.com/ViaQ/logerr/kverrors" - "github.com/go-logr/logr" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/imdario/mergo" routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -24,14 +23,18 @@ import ( // - Deployment // - StatefulSet // - ServiceMonitor -func MutateFuncFor(log logr.Logger, existing, desired client.Object) controllerutil.MutateFn { +func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn { return func() error { existingAnnotations := existing.GetAnnotations() - mergeWithOverride(log, &existingAnnotations, desired.GetAnnotations()) + if err := mergeWithOverride(&existingAnnotations, desired.GetAnnotations()); err != nil { + return err + } existing.SetAnnotations(existingAnnotations) existingLabels := existing.GetLabels() - mergeWithOverride(log, &existingLabels, desired.GetLabels()) + if err := mergeWithOverride(&existingLabels, desired.GetLabels()); err != nil { + return err + } existing.SetLabels(existingLabels) switch existing.(type) { @@ -43,7 +46,7 @@ func MutateFuncFor(log logr.Logger, existing, desired client.Object) controlleru case *corev1.Service: svc := existing.(*corev1.Service) wantSvc := desired.(*corev1.Service) - mutateService(log, svc, wantSvc) + return mutateService(svc, wantSvc) case *corev1.ServiceAccount: sa := existing.(*corev1.ServiceAccount) @@ -73,12 +76,12 @@ func MutateFuncFor(log logr.Logger, existing, desired client.Object) controlleru case *appsv1.Deployment: dpl := existing.(*appsv1.Deployment) wantDpl := desired.(*appsv1.Deployment) - mutateDeployment(log, dpl, wantDpl) + return mutateDeployment(dpl, wantDpl) case *appsv1.StatefulSet: sts := existing.(*appsv1.StatefulSet) wantSts := desired.(*appsv1.StatefulSet) - mutateStatefulSet(log, sts, wantSts) + return mutateStatefulSet(sts, wantSts) case *monitoringv1.ServiceMonitor: svcMonitor := existing.(*monitoringv1.ServiceMonitor) @@ -108,22 +111,18 @@ func MutateFuncFor(log logr.Logger, existing, desired client.Object) controlleru } } -func mergeWithOverride(log logr.Logger, dst, src interface{}) { +func mergeWithOverride(dst, src interface{}) error { err := mergo.Merge(dst, src, mergo.WithOverride) if err != nil { - log.Error(err, "unable to mergeWithOverride", "dst", dst, "src", src) + return kverrors.Wrap(err, "unable to mergeWithOverride", "dst", dst, "src", src) } + return nil } func mutateConfigMap(existing, desired *corev1.ConfigMap) { existing.BinaryData = desired.BinaryData } -func mutateService(log logr.Logger, existing, desired *corev1.Service) { - existing.Spec.Ports = desired.Spec.Ports - mergeWithOverride(log, &existing.Spec.Selector, desired.Spec.Selector) -} - func mutateServiceAccount(existing, desired *corev1.ServiceAccount) { existing.Annotations = desired.Annotations existing.Labels = desired.Labels @@ -153,33 +152,6 @@ func mutateRoleBinding(existing, desired *rbacv1.RoleBinding) { existing.Subjects = desired.Subjects } -func mutateDeployment(log logr.Logger, existing, desired *appsv1.Deployment) { - // Deployment selector is immutable so we set this value only if - // a new object is going to be created - if existing.CreationTimestamp.IsZero() { - mergeWithOverride(log, existing.Spec.Selector, desired.Spec.Selector) - } - existing.Spec.Replicas = desired.Spec.Replicas - mergeWithOverride(log, &existing.Spec.Template, desired.Spec.Template) - mergeWithOverride(log, &existing.Spec.Strategy, desired.Spec.Strategy) -} - -func mutateStatefulSet(log logr.Logger, existing, desired *appsv1.StatefulSet) { - // StatefulSet selector is immutable so we set this value only if - // a new object is going to be created - if existing.CreationTimestamp.IsZero() { - existing.Spec.Selector = desired.Spec.Selector - } - existing.Spec.PodManagementPolicy = desired.Spec.PodManagementPolicy - existing.Spec.Replicas = desired.Spec.Replicas - mergeWithOverride(log, &existing.Spec.Template, desired.Spec.Template) - for i := range existing.Spec.VolumeClaimTemplates { - existing.Spec.VolumeClaimTemplates[i].TypeMeta = desired.Spec.VolumeClaimTemplates[i].TypeMeta - existing.Spec.VolumeClaimTemplates[i].ObjectMeta = desired.Spec.VolumeClaimTemplates[i].ObjectMeta - existing.Spec.VolumeClaimTemplates[i].Spec = desired.Spec.VolumeClaimTemplates[i].Spec - } -} - func mutateServiceMonitor(existing, desired *monitoringv1.ServiceMonitor) { // ServiceMonitor selector is immutable so we set this value only if // a new object is going to be created @@ -204,3 +176,46 @@ func mutatePrometheusRule(existing, desired *monitoringv1.PrometheusRule) { existing.Labels = desired.Labels existing.Spec = desired.Spec } + +func mutateService(existing, desired *corev1.Service) error { + existing.Spec.Ports = desired.Spec.Ports + if err := mergeWithOverride(&existing.Spec.Selector, desired.Spec.Selector); err != nil { + return err + } + return nil +} + +func mutateDeployment(existing, desired *appsv1.Deployment) error { + // Deployment selector is immutable so we set this value only if + // a new object is going to be created + if existing.CreationTimestamp.IsZero() { + existing.Spec.Selector = desired.Spec.Selector + } + existing.Spec.Replicas = desired.Spec.Replicas + if err := mergeWithOverride(&existing.Spec.Template, desired.Spec.Template); err != nil { + return err + } + if err := mergeWithOverride(&existing.Spec.Strategy, desired.Spec.Strategy); err != nil { + return err + } + return nil +} + +func mutateStatefulSet(existing, desired *appsv1.StatefulSet) error { + // StatefulSet selector is immutable so we set this value only if + // a new object is going to be created + if existing.CreationTimestamp.IsZero() { + existing.Spec.Selector = desired.Spec.Selector + } + existing.Spec.PodManagementPolicy = desired.Spec.PodManagementPolicy + existing.Spec.Replicas = desired.Spec.Replicas + for i := range existing.Spec.VolumeClaimTemplates { + existing.Spec.VolumeClaimTemplates[i].TypeMeta = desired.Spec.VolumeClaimTemplates[i].TypeMeta + existing.Spec.VolumeClaimTemplates[i].ObjectMeta = desired.Spec.VolumeClaimTemplates[i].ObjectMeta + existing.Spec.VolumeClaimTemplates[i].Spec = desired.Spec.VolumeClaimTemplates[i].Spec + } + if err := mergeWithOverride(&existing.Spec.Template, desired.Spec.Template); err != nil { + return err + } + return nil +} diff --git a/operator/internal/manifests/mutate_test.go b/operator/internal/manifests/mutate_test.go index b0e1511c822f3..4699780080572 100644 --- a/operator/internal/manifests/mutate_test.go +++ b/operator/internal/manifests/mutate_test.go @@ -5,7 +5,6 @@ import ( "github.com/grafana/loki/operator/internal/manifests" - "github.com/ViaQ/logerr/log" routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/stretchr/testify/require" @@ -18,8 +17,6 @@ import ( "k8s.io/utils/pointer" ) -var logger = log.DefaultLogger() - func TestGetMutateFunc_MutateObjectMeta(t *testing.T) { got := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -43,7 +40,7 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -55,7 +52,7 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) { func TestGetMutateFunc_ReturnErrOnNotSupportedType(t *testing.T) { got := &corev1.Endpoints{} want := &corev1.Endpoints{} - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) require.Error(t, f()) } @@ -71,7 +68,7 @@ func TestGetMutateFunc_MutateConfigMap(t *testing.T) { BinaryData: map[string][]byte{"btest": []byte("btestss")}, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -120,7 +117,7 @@ func TestGetMutateFunc_MutateServiceSpec(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -235,7 +232,7 @@ func TestGetMutateFunc_MutateServiceAccountObjectMeta(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - f := manifests.MutateFuncFor(logger, tt.got, tt.want) + f := manifests.MutateFuncFor(tt.got, tt.want) err := f() require.NoError(t, err) @@ -297,7 +294,7 @@ func TestGetMutateFunc_MutateClusterRole(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -362,7 +359,7 @@ func TestGetMutateFunc_MutateClusterRoleBinding(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -417,7 +414,7 @@ func TestGetMutateFunc_MutateRole(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -482,7 +479,7 @@ func TestGetMutateFunc_MutateRoleBinding(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -601,7 +598,7 @@ func TestGeMutateFunc_MutateDeploymentSpec(t *testing.T) { tst := tst t.Run(tst.name, func(t *testing.T) { t.Parallel() - f := manifests.MutateFuncFor(logger, tst.got, tst.want) + f := manifests.MutateFuncFor(tst.got, tst.want) err := f() require.NoError(t, err) @@ -758,7 +755,7 @@ func TestGeMutateFunc_MutateStatefulSetSpec(t *testing.T) { tst := tst t.Run(tst.name, func(t *testing.T) { t.Parallel() - f := manifests.MutateFuncFor(logger, tst.got, tst.want) + f := manifests.MutateFuncFor(tst.got, tst.want) err := f() require.NoError(t, err) @@ -931,7 +928,7 @@ func TestGetMutateFunc_MutateServiceMonitorSpec(t *testing.T) { tst := tst t.Run(tst.name, func(t *testing.T) { t.Parallel() - f := manifests.MutateFuncFor(logger, tst.got, tst.want) + f := manifests.MutateFuncFor(tst.got, tst.want) err := f() require.NoError(t, err) @@ -999,7 +996,7 @@ func TestGetMutateFunc_MutateIngress(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) @@ -1052,7 +1049,7 @@ func TestGetMutateFunc_MutateRoute(t *testing.T) { }, } - f := manifests.MutateFuncFor(logger, got, want) + f := manifests.MutateFuncFor(got, want) err := f() require.NoError(t, err) diff --git a/operator/internal/manifests/openshift/configure.go b/operator/internal/manifests/openshift/configure.go index dec2f139340c7..0379648fc2b46 100644 --- a/operator/internal/manifests/openshift/configure.go +++ b/operator/internal/manifests/openshift/configure.go @@ -5,7 +5,7 @@ import ( "regexp" "strings" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/imdario/mergo" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" diff --git a/operator/internal/manifests/service_monitor.go b/operator/internal/manifests/service_monitor.go index 80a4de61c9b82..197b1278f5e19 100644 --- a/operator/internal/manifests/service_monitor.go +++ b/operator/internal/manifests/service_monitor.go @@ -1,7 +1,7 @@ package manifests import ( - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" diff --git a/operator/internal/sizes/predict.go b/operator/internal/sizes/predict.go index 02d4ff7b35c1e..c130ab424097e 100644 --- a/operator/internal/sizes/predict.go +++ b/operator/internal/sizes/predict.go @@ -6,7 +6,7 @@ import ( "os" "time" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/prometheus/client_golang/api" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" diff --git a/operator/internal/status/components.go b/operator/internal/status/components.go index 010b6af0bb43a..66d0426e7b9c5 100644 --- a/operator/internal/status/components.go +++ b/operator/internal/status/components.go @@ -3,7 +3,7 @@ package status import ( "context" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/manifests" diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go index b1e8bf46a2b48..ef94966c86bb6 100644 --- a/operator/internal/status/lokistack.go +++ b/operator/internal/status/lokistack.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/external/k8s" diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go index 52bc2d8910f90..9138db3c1ebdc 100644 --- a/operator/internal/status/status.go +++ b/operator/internal/status/status.go @@ -3,7 +3,7 @@ package status import ( "context" - "github.com/ViaQ/logerr/kverrors" + "github.com/ViaQ/logerr/v2/kverrors" lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1" "github.com/grafana/loki/operator/internal/external/k8s" diff --git a/operator/main.go b/operator/main.go index fdab93314d3ab..02f0a3b4bf4b5 100644 --- a/operator/main.go +++ b/operator/main.go @@ -6,8 +6,8 @@ import ( "net/http/pprof" "os" - "github.com/ViaQ/logerr/kverrors" - "github.com/ViaQ/logerr/log" + "github.com/ViaQ/logerr/v2/kverrors" + "github.com/ViaQ/logerr/v2/log" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. From 0cc3fe7e541a2753b5b5b7046a1d970b65ef15b3 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Tue, 26 Apr 2022 17:02:38 +0200 Subject: [PATCH 017/223] Disable calling new index-gateway client's API. (#6025) * Disable calling new index-gateway client's API. * Remove infinite recursive call * Correct mock setup for test --- .../series/series_index_gateway_store.go | 71 ++----------------- .../series/series_index_gateway_store_test.go | 17 ++++- 2 files changed, 20 insertions(+), 68 deletions(-) diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go index 5243c537a5786..9840dad6f2002 100644 --- a/pkg/storage/stores/series/series_index_gateway_store.go +++ b/pkg/storage/stores/series/series_index_gateway_store.go @@ -3,14 +3,11 @@ package series import ( "context" - "github.com/gogo/status" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/storage/stores/shipper/indexgateway/indexgatewaypb" ) @@ -33,78 +30,18 @@ func NewIndexGatewayClientStore(client IndexGatewayClient, index *IndexStore) *I } func (c *IndexGatewayClientStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]logproto.ChunkRef, error) { - response, err := c.client.GetChunkRef(ctx, &indexgatewaypb.GetChunkRefRequest{ - From: from, - Through: through, - Matchers: (&syntax.MatchersExpr{Mts: allMatchers}).String(), - }) - if err != nil { - if isUnimplementedCallError(err) { - // Handle communication with older index gateways gracefully, by falling back to the index store calls. - return c.IndexStore.GetChunkRefs(ctx, userID, from, through, allMatchers...) - } - return nil, err - } - result := make([]logproto.ChunkRef, len(response.Refs)) - for i, ref := range response.Refs { - result[i] = *ref - } - - return result, nil + return c.IndexStore.GetChunkRefs(ctx, userID, from, through, allMatchers...) } func (c *IndexGatewayClientStore) GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error) { - refs, err := c.GetChunkRefs(ctx, userID, from, through, matchers...) - if err != nil { - return nil, err - } - return c.chunksToSeries(ctx, refs, matchers) + return c.IndexStore.GetSeries(ctx, userID, from, through, matchers...) } // LabelNamesForMetricName retrieves all label names for a metric name. func (c *IndexGatewayClientStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { - resp, err := c.client.LabelNamesForMetricName(ctx, &indexgatewaypb.LabelNamesForMetricNameRequest{ - MetricName: metricName, - From: from, - Through: through, - }) - if isUnimplementedCallError(err) { - // Handle communication with older index gateways gracefully, by falling back to the index store calls. - return c.IndexStore.LabelNamesForMetricName(ctx, userID, from, through, metricName) - } - if err != nil { - return nil, err - } - return resp.Values, nil + return c.IndexStore.LabelNamesForMetricName(ctx, userID, from, through, metricName) } func (c *IndexGatewayClientStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) { - resp, err := c.client.LabelValuesForMetricName(ctx, &indexgatewaypb.LabelValuesForMetricNameRequest{ - MetricName: metricName, - LabelName: labelName, - From: from, - Through: through, - Matchers: (&syntax.MatchersExpr{Mts: matchers}).String(), - }) - if isUnimplementedCallError(err) { - // Handle communication with older index gateways gracefully, by falling back to the index store calls. - return c.IndexStore.LabelValuesForMetricName(ctx, userID, from, through, metricName, labelName, matchers...) - } - if err != nil { - return nil, err - } - return resp.Values, nil -} - -// isUnimplementedCallError tells if the GRPC error is a gRPC error with code Unimplemented. -func isUnimplementedCallError(err error) bool { - if err == nil { - return false - } - - s, ok := status.FromError(err) - if !ok { - return false - } - return (s.Code() == codes.Unimplemented) + return c.IndexStore.LabelValuesForMetricName(ctx, userID, from, through, metricName, labelName) } diff --git a/pkg/storage/stores/series/series_index_gateway_store_test.go b/pkg/storage/stores/series/series_index_gateway_store_test.go index c1bbd65868edc..239f962ef0173 100644 --- a/pkg/storage/stores/series/series_index_gateway_store_test.go +++ b/pkg/storage/stores/series/series_index_gateway_store_test.go @@ -28,13 +28,28 @@ func (fakeClient) GetChunkRef(ctx context.Context, in *indexgatewaypb.GetChunkRe } func Test_IndexGatewayClient(t *testing.T) { + schemaCfg := config.SchemaConfig{ + Configs: []config.PeriodConfig{ + {From: config.DayTime{Time: model.Now().Add(-24 * time.Hour)}, Schema: "v12", RowShards: 16}, + }, + } + schema, err := index.CreateSchema(schemaCfg.Configs[0]) + require.NoError(t, err) + testutils.ResetMockStorage() + tm, err := index.NewTableManager(index.TableManagerConfig{}, schemaCfg, 2*time.Hour, testutils.NewMockStorage(), nil, nil, nil) + require.NoError(t, err) + require.NoError(t, tm.SyncTables(context.Background())) + idx := IndexGatewayClientStore{ client: fakeClient{}, IndexStore: &IndexStore{ chunkBatchSize: 1, + schema: schema, + schemaCfg: schemaCfg, + index: testutils.NewMockStorage(), }, } - _, err := idx.GetSeries(context.Background(), "foo", model.Earliest, model.Latest) + _, err = idx.GetSeries(context.Background(), "foo", model.Now(), model.Now().Add(1*time.Hour), labels.MustNewMatcher(labels.MatchEqual, "__name__", "logs")) require.NoError(t, err) } From 986fe5cf05324709e0c821e25d2169459f252bf3 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Tue, 26 Apr 2022 17:13:41 +0200 Subject: [PATCH 018/223] Remove empty match within concat of regex (#6026) * Remove empty match within concat of regex * add back tests --- pkg/logql/log/filter.go | 10 ++++++++++ pkg/logql/log/filter_test.go | 2 ++ 2 files changed, 12 insertions(+) diff --git a/pkg/logql/log/filter.go b/pkg/logql/log/filter.go index e1530fec2c49e..764584b5889e0 100644 --- a/pkg/logql/log/filter.go +++ b/pkg/logql/log/filter.go @@ -471,6 +471,16 @@ func simplifyAlternate(reg *syntax.Regexp) (Filterer, bool) { // Anything else is rejected. func simplifyConcat(reg *syntax.Regexp, baseLiteral []byte) (Filterer, bool) { clearCapture(reg.Sub...) + // remove empty match as we don't need them for filtering + i := 0 + for _, r := range reg.Sub { + if r.Op == syntax.OpEmptyMatch { + continue + } + reg.Sub[i] = r + i++ + } + reg.Sub = reg.Sub[:i] // we support only simplication of concat operation with 3 sub expressions. // for instance .*foo.*bar contains 4 subs (.*+foo+.*+bar) and can't be simplified. if len(reg.Sub) > 3 { diff --git a/pkg/logql/log/filter_test.go b/pkg/logql/log/filter_test.go index bd28f26868a9d..0e3d112e24c9d 100644 --- a/pkg/logql/log/filter_test.go +++ b/pkg/logql/log/filter_test.go @@ -56,6 +56,7 @@ func Test_SimplifiedRegex(t *testing.T) { {"(?i)foo", true, newContainsFilter([]byte("foo"), true), true}, {"(?i)界", true, newContainsFilter([]byte("界"), true), true}, {"(?i)ïB", true, newContainsFilter([]byte("ïB"), true), true}, + {"(?:)foo|fatal|exception", true, newOrFilter(newOrFilter(newContainsFilter([]byte("foo"), false), newContainsFilter([]byte("fatal"), false)), newContainsFilter([]byte("exception"), false)), true}, // regex we are not supporting. {"[a-z]+foo", true, nil, false}, @@ -160,6 +161,7 @@ func Benchmark_LineFilter(b *testing.B) { {"(node:24) buzz*"}, {"(HTTP/.*\\\"|HEAD|GET) (2..|5..)"}, {"\"@l\":\"(Warning|Error|Fatal)\""}, + {"(?:)foo|fatal|exception"}, } { benchmarkRegex(b, test.re, logline, true) benchmarkRegex(b, test.re, logline, false) From 88c4781dc72b410a30e054f82d2c4aad0558afbb Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Wed, 27 Apr 2022 14:51:06 -0300 Subject: [PATCH 019/223] Revendor `grafana/tail` to fix promtail symlink behavior (#6034) * Revendor grafana/tail. - This revendoring brings the fix to relative symlinks, as described in https://github.com/grafana/loki/issues/3374. * Prune package from `go mod tidy`. * noop --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/hpcloud/tail/tail_posix.go | 3 ++- vendor/modules.txt | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index f1ca8b19072ec..4dcca277a97ac 100644 --- a/go.mod +++ b/go.mod @@ -284,7 +284,7 @@ require ( // Upgrade to run with gRPC 1.3.0 and above. replace github.com/sercand/kuberesolver => github.com/sercand/kuberesolver v2.4.0+incompatible -replace github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03 +replace github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c replace github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible diff --git a/go.sum b/go.sum index 5e258309f8b49..75f66499f570c 100644 --- a/go.sum +++ b/go.sum @@ -1044,8 +1044,8 @@ github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0I github.com/grafana/regexp v0.0.0-20220202152315-e74e38789280/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/regexp v0.0.0-20220304100321-149c8afcd6cb h1:wwzNkyaQwcXCzQuKoWz3lwngetmcyg+EhW0fF5lz73M= github.com/grafana/regexp v0.0.0-20220304100321-149c8afcd6cb/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03 h1:fGgFrAraMB0BaPfYumu+iulfDXwHm+GFyHA4xEtBqI8= -github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03/go.mod h1:GIMXMPB/lRAllP5rVDvcGif87ryO2hgD7tCtHMdHrho= +github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c h1:qIsCzNln5YzuXfXbJgXhpfM+4gY7qi3mED3eYQS4Fls= +github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c/go.mod h1:GIMXMPB/lRAllP5rVDvcGif87ryO2hgD7tCtHMdHrho= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go index a21f3ded46c6a..8149d9d4ce840 100644 --- a/vendor/github.com/hpcloud/tail/tail_posix.go +++ b/vendor/github.com/hpcloud/tail/tail_posix.go @@ -4,6 +4,7 @@ package tail import ( "os" + "path/filepath" ) func OpenFile(name string) (file *os.File, err error) { @@ -14,7 +15,7 @@ func OpenFile(name string) (file *os.File, err error) { return nil, err } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - filename, err = os.Readlink(name) + filename, err = filepath.EvalSymlinks(name) if err != nil { return nil, err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 90e2c6ab9364c..5bb151bfe5e2b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -610,7 +610,7 @@ github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.9.6 ## explicit; go 1.12 github.com/hashicorp/serf/coordinate -# github.com/hpcloud/tail v1.0.0 => github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03 +# github.com/hpcloud/tail v1.0.0 => github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c ## explicit; go 1.13 github.com/hpcloud/tail github.com/hpcloud/tail/ratelimiter @@ -1651,7 +1651,7 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml # github.com/sercand/kuberesolver => github.com/sercand/kuberesolver v2.4.0+incompatible -# github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03 +# github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c # github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible # github.com/Azure/azure-storage-blob-go => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e # k8s.io/client-go => k8s.io/client-go v0.21.0 From fc8c4f05923150c7e68d35b873af5e9f1191ac03 Mon Sep 17 00:00:00 2001 From: Ed Welch Date: Thu, 28 Apr 2022 07:58:49 -0400 Subject: [PATCH 020/223] Loki: Add a configurable ability to fudge incoming timestamps for guaranteed query sort order when receiving entries for the same stream that have duplicate timestamps. (#6042) * Add a per tenant ability to fudge duplicate timestamps in the distributor Signed-off-by: Edward Welch * update docs Signed-off-by: Edward Welch * lint Signed-off-by: Edward Welch * update changelog Signed-off-by: Edward Welch --- CHANGELOG.md | 1 + docs/sources/configuration/_index.md | 10 ++ pkg/distributor/distributor.go | 14 ++ pkg/distributor/distributor_test.go | 208 +++++++++++++++++++++++++++ pkg/distributor/limits.go | 2 + pkg/distributor/validator.go | 21 +-- pkg/validation/limits.go | 30 ++-- 7 files changed, 265 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ddf5804c904b..5d1565f4e8d5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ ##### Fixes * [5685](https://github.com/grafana/loki/pull/5685) **chaudum**: Assert that push values tuples consist of string values ##### Changes +* [6042](https://github.com/grafana/loki/pull/6042) **slim-bean**: Add a new configuration to allow fudging of ingested timestamps to guarantee sort order of duplicate timestamps at query time. * [5777](https://github.com/grafana/loki/pull/5777) **tatchiuleung**: storage: make Azure blobID chunk delimiter configurable * [5650](https://github.com/grafana/loki/pull/5650) **cyriltovena**: Remove more chunkstore and schema version below v9 * [5643](https://github.com/grafana/loki/pull/5643) **simonswine**: Introduce a ChunkRef type as part of logproto diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 4861c24537c44..9a1c3c0dd6520 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -2100,6 +2100,16 @@ The `limits_config` block configures global and per-tenant limits in Loki. # CLI flag: -distributor.max-line-size-truncate [max_line_size_truncate: | default = false ] +# Fudge the log line timestamp during ingestion when it's the same as the previous entry for the same stream +# When enabled, if a log line in a push request has the same timestamp as the previous line +# for the same stream, one nanosecond is added to the log line. This will preserve the received +# order of log lines with the exact same timestamp when they are queried by slightly altering +# their stored timestamp. NOTE: this is imperfect because Loki accepts out of order writes +# and another push request for the same stream could contain duplicate timestamps to existing +# entries and they will not be fudged. +# CLI flag: -validation.fudge-duplicate-timestamps +[fudge_duplicate_timestamp: | default = false ] + # Maximum number of log entries that will be returned for a query. # CLI flag: -validation.max-entries-limit [max_entries_limit_per_query: | default = 5000 ] diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 932c9f9b76f63..d302d24534d81 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -271,7 +271,21 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log validationErr = err continue } + stream.Entries[n] = entry + + // If configured for this tenant, fudge duplicate timestamps. Note, this is imperfect + // since Loki will accept out of order writes it doesn't account for separate + // pushes with overlapping time ranges having entries with duplicate timestamps + if validationContext.fudgeDuplicateTimestamps && n != 0 && stream.Entries[n-1].Timestamp.Equal(entry.Timestamp) { + // Traditional logic for Loki is that 2 lines with the same timestamp and + // exact same content will be de-duplicated, (i.e. only one will be stored, others dropped) + // To maintain this behavior, only fudge the timestamp if the log content is different + if stream.Entries[n-1].Line != entry.Line { + stream.Entries[n].Timestamp = entry.Timestamp.Add(1 * time.Nanosecond) + } + } + n++ validatedSamplesSize += len(entry.Line) validatedSamplesCount++ diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index f39da9d93541f..2da8986a3f145 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -100,6 +100,214 @@ func TestDistributor(t *testing.T) { } } +func Test_FudgeTimestamp(t *testing.T) { + fudgingDisabled := &validation.Limits{} + flagext.DefaultValues(fudgingDisabled) + fudgingDisabled.RejectOldSamples = false + + fudgingEnabled := &validation.Limits{} + flagext.DefaultValues(fudgingEnabled) + fudgingEnabled.RejectOldSamples = false + fudgingEnabled.FudgeDuplicateTimestamp = true + + tests := map[string]struct { + limits *validation.Limits + push *logproto.PushRequest + expectedPush *logproto.PushRequest + }{ + "fudging disabled, no dupes": { + limits: fudgingDisabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123457, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123457, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + }, + "fudging disabled, with dupe timestamp different entry": { + limits: fudgingDisabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + }, + "fudging disabled, with dupe timestamp same entry": { + limits: fudgingDisabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + }, + }, + }, + }, + }, + "fudging enabled, no dupes": { + limits: fudgingEnabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123457, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123457, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + }, + "fudging enabled, with dupe timestamp different entry": { + limits: fudgingEnabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyiiiiiii"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 1), Line: "heyiiiiiii"}, + }, + }, + }, + }, + }, + "fudging enabled, with dupe timestamp same entry": { + limits: fudgingEnabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + }, + }, + }, + }, + }, + "fudging enabled, multiple subsequent fudges": { + limits: fudgingEnabled, + push: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 0), Line: "hi"}, + {Timestamp: time.Unix(123456, 1), Line: "hey there"}, + }, + }, + }, + }, + expectedPush: &logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: "{job=\"foo\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(123456, 0), Line: "heyooooooo"}, + {Timestamp: time.Unix(123456, 1), Line: "hi"}, + {Timestamp: time.Unix(123456, 2), Line: "hey there"}, + }, + }, + }, + }, + }, + } + + for testName, testData := range tests { + testData := testData + + t.Run(testName, func(t *testing.T) { + ingester := &mockIngester{} + d := prepare(t, testData.limits, nil, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + defer services.StopAndAwaitTerminated(context.Background(), d) //nolint:errcheck + _, err := d.Push(ctx, testData.push) + assert.NoError(t, err) + assert.Equal(t, testData.expectedPush, ingester.pushed[0]) + }) + } +} + func Test_SortLabelsOnPush(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go index 559f4b55051c3..cade28eb093da 100644 --- a/pkg/distributor/limits.go +++ b/pkg/distributor/limits.go @@ -14,4 +14,6 @@ type Limits interface { CreationGracePeriod(userID string) time.Duration RejectOldSamples(userID string) bool RejectOldSamplesMaxAge(userID string) time.Duration + + FudgeDuplicateTimestamps(userID string) bool } diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go index f69dc5c18e132..2aca4f42e029c 100644 --- a/pkg/distributor/validator.go +++ b/pkg/distributor/validator.go @@ -40,20 +40,23 @@ type validationContext struct { maxLabelNameLength int maxLabelValueLength int + fudgeDuplicateTimestamps bool + userID string } func (v Validator) getValidationContextForTime(now time.Time, userID string) validationContext { return validationContext{ - userID: userID, - rejectOldSample: v.RejectOldSamples(userID), - rejectOldSampleMaxAge: now.Add(-v.RejectOldSamplesMaxAge(userID)).UnixNano(), - creationGracePeriod: now.Add(v.CreationGracePeriod(userID)).UnixNano(), - maxLineSize: v.MaxLineSize(userID), - maxLineSizeTruncate: v.MaxLineSizeTruncate(userID), - maxLabelNamesPerSeries: v.MaxLabelNamesPerSeries(userID), - maxLabelNameLength: v.MaxLabelNameLength(userID), - maxLabelValueLength: v.MaxLabelValueLength(userID), + userID: userID, + rejectOldSample: v.RejectOldSamples(userID), + rejectOldSampleMaxAge: now.Add(-v.RejectOldSamplesMaxAge(userID)).UnixNano(), + creationGracePeriod: now.Add(v.CreationGracePeriod(userID)).UnixNano(), + maxLineSize: v.MaxLineSize(userID), + maxLineSizeTruncate: v.MaxLineSizeTruncate(userID), + maxLabelNamesPerSeries: v.MaxLabelNamesPerSeries(userID), + maxLabelNameLength: v.MaxLabelNameLength(userID), + maxLabelValueLength: v.MaxLabelValueLength(userID), + fudgeDuplicateTimestamps: v.FudgeDuplicateTimestamps(userID), } } diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index cca7e23203c0d..b7b13398d0095 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -46,18 +46,19 @@ const ( // to support user-friendly duration format (e.g: "1h30m45s") in JSON value. type Limits struct { // Distributor enforced limits. - IngestionRateStrategy string `yaml:"ingestion_rate_strategy" json:"ingestion_rate_strategy"` - IngestionRateMB float64 `yaml:"ingestion_rate_mb" json:"ingestion_rate_mb"` - IngestionBurstSizeMB float64 `yaml:"ingestion_burst_size_mb" json:"ingestion_burst_size_mb"` - MaxLabelNameLength int `yaml:"max_label_name_length" json:"max_label_name_length"` - MaxLabelValueLength int `yaml:"max_label_value_length" json:"max_label_value_length"` - MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series" json:"max_label_names_per_series"` - RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"` - RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"` - CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"` - EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` - MaxLineSize flagext.ByteSize `yaml:"max_line_size" json:"max_line_size"` - MaxLineSizeTruncate bool `yaml:"max_line_size_truncate" json:"max_line_size_truncate"` + IngestionRateStrategy string `yaml:"ingestion_rate_strategy" json:"ingestion_rate_strategy"` + IngestionRateMB float64 `yaml:"ingestion_rate_mb" json:"ingestion_rate_mb"` + IngestionBurstSizeMB float64 `yaml:"ingestion_burst_size_mb" json:"ingestion_burst_size_mb"` + MaxLabelNameLength int `yaml:"max_label_name_length" json:"max_label_name_length"` + MaxLabelValueLength int `yaml:"max_label_value_length" json:"max_label_value_length"` + MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series" json:"max_label_names_per_series"` + RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"` + RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"` + CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"` + EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` + MaxLineSize flagext.ByteSize `yaml:"max_line_size" json:"max_line_size"` + MaxLineSizeTruncate bool `yaml:"max_line_size_truncate" json:"max_line_size_truncate"` + FudgeDuplicateTimestamp bool `yaml:"fudge_duplicate_timestamp" json:"fudge_duplicate_timestamp"` // Ingester enforced limits. MaxLocalStreamsPerUser int `yaml:"max_streams_per_user" json:"max_streams_per_user"` @@ -135,6 +136,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", true, "Reject old samples.") + f.BoolVar(&l.FudgeDuplicateTimestamp, "validation.fudge-duplicate-timestamps", false, "Fudge the timestamp of a log line by one nanosecond in the future from a previous entry for the same stream with the same timestamp, guarantees sort order at query time.") _ = l.RejectOldSamplesMaxAge.Set("7d") f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.") @@ -537,6 +539,10 @@ func (o *Overrides) PerStreamRateLimit(userID string) RateLimit { } } +func (o *Overrides) FudgeDuplicateTimestamps(userID string) bool { + return o.getOverridesForUser(userID).FudgeDuplicateTimestamp +} + func (o *Overrides) getOverridesForUser(userID string) *Limits { if o.tenantLimits != nil { l := o.tenantLimits.TenantLimits(userID) From cd02d6a478f3d20e88be7b03ec68b2ca84304816 Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Thu, 28 Apr 2022 11:03:36 -0300 Subject: [PATCH 021/223] Add better observability to queryReadiness (#5946) * Add two metrics to the IndexGateway. - Add a new `query_readiness_duration_seconds` metric, that reports query readiness duration of a tablemanager/index gateway instance. We should use it later to report performance against the ring mode - Add a new `usersToBeQueryReadyForTotal` metric, that reports number of users involved in the query readiness operation. We should use it later to correlate number of users with the query readiness duration. * Remove `usersToBeQueryReadyForTotal`. - It will report all users always for now, so it isn't too helpful the way it is. * Rename metric help text to not mislead people. * Log queryReadiness duration. * Fix where log message and duration and triggered. * Join users list in a single string. - This is necessary since go-kit doesn't support array type. * Tweak queryReadiness log messages. - As suggested by Ed on https://github.com/grafana/loki/pull/5972#discussion_r859734129 and https://github.com/grafana/loki/pull/5972#discussion_r859736072 * Ensure queryReadinessDuration metric. - It is redundant with a recently added log line. * noop --- pkg/storage/stores/shipper/downloads/table_manager.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/storage/stores/shipper/downloads/table_manager.go b/pkg/storage/stores/shipper/downloads/table_manager.go index b552cc272351a..04d61d2784c28 100644 --- a/pkg/storage/stores/shipper/downloads/table_manager.go +++ b/pkg/storage/stores/shipper/downloads/table_manager.go @@ -7,6 +7,7 @@ import ( "path/filepath" "regexp" "strconv" + "strings" "sync" "time" @@ -241,6 +242,11 @@ func (tm *TableManager) cleanupCache() error { // ensureQueryReadiness compares tables required for being query ready with the tables we already have and downloads the missing ones. func (tm *TableManager) ensureQueryReadiness(ctx context.Context) error { + start := time.Now() + defer func() { + level.Info(util_log.Logger).Log("msg", "query readiness setup completed", "duration", time.Since(start)) + }() + activeTableNumber := getActiveTableNumber() // find the largest query readiness number @@ -309,9 +315,12 @@ func (tm *TableManager) ensureQueryReadiness(ctx context.Context) error { return err } + perTableStart := time.Now() if err := table.EnsureQueryReadiness(ctx, usersToBeQueryReadyFor); err != nil { return err } + joinedUsers := strings.Join(usersToBeQueryReadyFor, ",") + level.Info(util_log.Logger).Log("msg", "index pre-download for query readiness completed", "users", joinedUsers, "duration", time.Since(perTableStart), "table", tableName) } return nil From 149227b88984c93a2fdb097eed54eaec5e725e7f Mon Sep 17 00:00:00 2001 From: Alexandre de Verteuil Date: Thu, 28 Apr 2022 11:58:46 -0400 Subject: [PATCH 022/223] Minor fixes in docs/sources/api/_index.md (#5915) --- docs/sources/api/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md index fb7ccc29f5dae..ccd245c5f0c75 100644 --- a/docs/sources/api/_index.md +++ b/docs/sources/api/_index.md @@ -103,7 +103,7 @@ query parameters support the following values: - `query`: The [LogQL](../logql/) query to perform - `limit`: The max number of entries to return. It defaults to `100`. Only applies to query types which produce a stream(log lines) response. - `time`: The evaluation time for the query as a nanosecond Unix epoch or another [supported format](#timestamp-formats). Defaults to now. -- `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward.` +- `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward`. In microservices mode, `/loki/api/v1/query` is exposed by the querier and the frontend. @@ -114,7 +114,7 @@ Response: "status": "success", "data": { "resultType": "vector" | "streams", - "result": [] | []. + "result": [] | [], "stats" : [] } } From c4ebfd3020c698a747776a9934deb35bed9c6aed Mon Sep 17 00:00:00 2001 From: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Date: Thu, 28 Apr 2022 13:15:46 -0700 Subject: [PATCH 023/223] Docs: revise getting started guide (#5939) * Revise the getting started section * Docs: revise getting started guide * Intermediate commit of getting started material. * Intermediate revision of the getting started guide. Add instructions to download, configure, and deploy the test environment. * Add sample queries and instructions for breaking down the test env * Revise prose using reviewer comments. * update to use promtail and flog in the docker compose file Signed-off-by: Edward Welch * Revise guide to put all parts into Docker containers. * Add README files for examples directory. Revise getting started instructions. * Address DylanGuedes review comments * Getting started guide: incorporate suggestions from ssncferreira Co-authored-by: Edward Welch --- docs/sources/getting-started/_index.md | 138 +++++++++++++++++- .../getting-started/get-logs-into-loki.md | 78 ---------- .../simple-scalable-test-environment.png | Bin 0 -> 37166 bytes .../installation/simple-scalable-docker.md | 57 -------- .../grafana.md | 4 +- .../troubleshooting.md | 4 +- docs/sources/tools/_index.md | 7 + .../{getting-started => tools}/logcli.md | 2 + examples/README.md | 11 ++ examples/getting-started/README.md | 4 + .../getting-started}/docker-compose.yaml | 22 +-- .../getting-started}/loki-config.yaml | 0 .../promtail-local-config.yaml | 22 +++ production/simple-scalable/.dockerignore | 1 - production/simple-scalable/.gitignore | 1 - .../simple-scalable/promtail-config.yaml | 20 --- 16 files changed, 193 insertions(+), 178 deletions(-) delete mode 100644 docs/sources/getting-started/get-logs-into-loki.md create mode 100644 docs/sources/getting-started/simple-scalable-test-environment.png delete mode 100644 docs/sources/installation/simple-scalable-docker.md rename docs/sources/{getting-started => operations}/grafana.md (95%) rename docs/sources/{getting-started => operations}/troubleshooting.md (98%) create mode 100644 docs/sources/tools/_index.md rename docs/sources/{getting-started => tools}/logcli.md (99%) create mode 100644 examples/README.md create mode 100644 examples/getting-started/README.md rename {production/simple-scalable => examples/getting-started}/docker-compose.yaml (90%) rename {production/simple-scalable => examples/getting-started}/loki-config.yaml (100%) create mode 100644 examples/getting-started/promtail-local-config.yaml delete mode 100644 production/simple-scalable/.dockerignore delete mode 100644 production/simple-scalable/.gitignore delete mode 100644 production/simple-scalable/promtail-config.yaml diff --git a/docs/sources/getting-started/_index.md b/docs/sources/getting-started/_index.md index 99679731e5d7a..1f7dad3c12e97 100644 --- a/docs/sources/getting-started/_index.md +++ b/docs/sources/getting-started/_index.md @@ -1,14 +1,142 @@ --- title: Getting started weight: 300 +description: "This guide assists the reader to create and use a simple Loki cluster for testing and evaluation purposes." +aliases: + - /docs/loki/latest/getting-started/get-logs-into-loki/ --- + # Getting started with Grafana Loki > **Note:** You can use [Grafana Cloud](https://grafana.com/products/cloud/features/#cloud-logs) to avoid installing, maintaining, and scaling your own instance of Grafana Loki. The free forever plan includes 50GB of free logs. [Create an account to get started](https://grafana.com/auth/sign-up/create-user?pg=docs-loki&plcmt=in-text). -1. [Getting Logs Into Loki](get-logs-into-loki/) -1. [Grafana](grafana/) -1. [LogCLI](logcli/) -1. [Labels](labels/) -1. [Troubleshooting](troubleshooting/) +This guide assists the reader to create and use a simple Loki cluster. +The cluster is intended for testing, development, and evaluation; +it will not meet most production requirements. + +The test environment runs the [flog](https://github.com/mingrammer/flog) app to generate log lines. +Promtail is the test environment's agent (or client) that captures the log lines and pushes them to the Loki cluster through a gateway. +In a typical environment, the log-generating app and the agent run together, but in locations distinct from the Loki cluster. This guide runs each piece of the test environment locally, in Docker containers. + +Grafana provides a way to pose queries against the logs stored in Loki and visualize query results. + +![Simple scalable deployment test environment](simple-scalable-test-environment.png) + +The test environment uses Docker compose to instantiate these parts, each in its own container: + +- One [single scalable deployment](../fundamentals/architecture/deployment-modes/) mode **Loki** instance has: + - One Loki read component + - One Loki write component + - **Minio** is Loki's storage back end in the test environment. +- The **gateway** receives requests and redirects them to the appropriate container based on the request's URL. +- **Flog** generates log lines. +- **Promtail** scrapes the log lines from flog, and pushes them to Loki through the gateway. +- **Grafana** provides visualization of the log lines captured within Loki. + +## Prerequisites + +- [Docker](https://docs.docker.com/install) +- [Docker Compose](https://docs.docker.com/compose/install) + +## Obtain the test environment + +1. Create a directory called `evaluate-loki` for the test environment. Make `evaluate-loki` your current working directory: + ```bash + mkdir evaluate-loki + cd evaluate-loki + ``` +1. Download `loki-config.yaml`, `promtail-local-config.yaml`, and `docker-compose.yaml`: + + ```bash + wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/loki-config.yaml -O loki-config.yaml + wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/promtail-local-config.yaml -O promtail-local-config.yaml + wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/docker-compose.yaml -O docker-compose.yaml + ``` + +## Deploy the test environment + +All shell commands are issued from the `evaluate-loki` directory. + +1. With `evaluate-loki` as the current working directory, deploy the test environment using `docker-compose`: + ```bash + docker-compose up -d + ``` +1. (Optional) Verify that the Loki cluster is up and running. The read component returns `ready` when you point a web browser at http://localhost:3101/ready. The message `Query Frontend not ready: not ready: number of schedulers this worker is connected to is 0` will show prior to the read component being ready. +The write component returns `ready` when you point a web browser at http://localhost:3102/ready. The message `Ingester not ready: waiting for 15s after being ready` will show prior to the write component being ready. + +## Use Grafana and the test environment + +Use [Grafana](https://grafana.com/docs/grafana/latest/) to query and observe the log lines captured in the Loki cluster by navigating a browser to http://localhost:3000. +The Grafana instance has Loki configured as a [datasource](https://grafana.com/docs/grafana/latest/datasources/loki/). + +Click on the Grafana instance's [Explore](https://grafana.com/docs/grafana/latest/explore/) icon to bring up the explore pane. + +Use the Explore dropdown menu to choose the Loki datasource and bring up the Loki query browser. + +Try some queries. +Enter your query into the **Log browser** box, and click on the blue **Run query** button. + +To see all the log lines that flog has generated: +``` +{container="evaluate-loki_flog_1"} +``` + +The flog app will generate log lines for invented HTTP requests. +To see all `GET` log lines, enter the query: + +``` +{container="evaluate-loki_flog_1"} |= "GET" +``` +For `POST` methods: +``` +{container="evaluate-loki_flog_1"} |= "POST" +``` + +To see every log line with a 401 status (unauthorized error): +``` +{container="evaluate-loki_flog_1"} | json | status="401" +``` +To see every log line other than those that contain the value 401: +``` +{container="evaluate-loki_flog_1"} != "401" +``` + +Refer to [query examples](../logql/query_examples/) for more examples. + +## Stop and clean up the test environment + +To break down the test environment: + +- Close the Grafana browser window + +- Stop and remove all the Docker containers. With `evaluate-loki` as the current working directory: + ```bash + docker-compose down + ``` + +## Modifying the flog app output + +You can modify the flog app's log line generation by changing +its configuration. +Choose one of these two ways to apply a new configuration: + +- To remove already-generated logs, restart the test environment with a new configuration. + + 1. Stop and clean up an existing test environment: + ``` + docker-compose down + ``` + 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output. + 1. Instantiate the new test environment: + ``` + docker-compose up + ``` + +- To keep already-generated logs in the running test environment, restart flog with a new configuration. + + 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output. + 1. Restart only the flog app within the currently-running test environment: + ``` + docker-compose up -d --force-recreate flog + ``` diff --git a/docs/sources/getting-started/get-logs-into-loki.md b/docs/sources/getting-started/get-logs-into-loki.md deleted file mode 100644 index b90f70b9c2948..0000000000000 --- a/docs/sources/getting-started/get-logs-into-loki.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Get logs into Loki -weight: 10 ---- -# Get logs into Grafana Loki - -After you [install and run Grafana Loki](../../installation/local/), you probably want to get logs from other applications into it. - -To get application logs into Loki, you need to edit the [Promtail]({{< relref "../clients/promtail" >}}) configuration file. - -Detailed information about configuring Promtail is available in [Promtail configuration](../../clients/promtail/configuration/). - -The following instructions should help you get started. - -1. If you haven't already, download a Promtail configuration file. Keep track of where it is, because you will need to cite it when you run the binary. - - ``` - wget https://raw.githubusercontent.com/grafana/loki/main/clients/cmd/promtail/promtail-local-config.yaml - ``` - -1. Open the configuration file in the text editor of your choice. It should look similar to this: - - ``` - server: - http_listen_port: 9080 - grpc_listen_port: 0 - - positions: - filename: /tmp/positions.yaml - - clients: - - url: http://loki:3100/loki/api/v1/push - - scrape_configs: - - job_name: system - static_configs: - - targets: - - localhost - labels: - job: varlogs - __path__: /var/log/*log - ``` - - The seven lines under `scrape_configs` are what send the logs that Promtail generates to Loki, which then outputs them in the command line and http://localhost:3100/metrics. - - Copy the seven lines under `scrape_configs`, and then paste them under the original job. You can instead edit the original seven lines. - - Below is an example that sends logs from a default Grafana installation to Loki. We updated the following fields: - - job_name - This differentiates the logs collected from other log groups. - - targets - Optional for `static_configs`. However, is often defined because in older versions of Promtail it was not optional. This was an artifact from directly using the Prometheus service discovery code, which required this entry. - - labels - Static label to apply to every log line scraped by this definition. Good examples include the environment name, job name, or app name. - - __path__ - The path to where the logs that Loki is to consume are stored. - - ``` - - job_name: grafana - static_configs: - - targets: - - grafana - labels: - job: grafana - __path__: "C:/Program Files/GrafanaLabs/grafana/data/log/grafana.log" - ``` - -1. Enter the following command to run Promtail. Examples below assume you have placed the configuration file in the same directory as the binary. - - **Windows** - - ``` - .\promtail-windows-amd64.exe --config.file=promtail-local-config.yaml - ``` - - **Linux** - - ``` - ./promtail-linux-amd64 -config.file=promtail-local-config.yaml - ``` - -You should now see your application logs. If you are using Grafana, you might need to refresh your instance in order to see the logs. diff --git a/docs/sources/getting-started/simple-scalable-test-environment.png b/docs/sources/getting-started/simple-scalable-test-environment.png new file mode 100644 index 0000000000000000000000000000000000000000..950cb70c30996fbd4239aeca6feddb04a87dd1ff GIT binary patch literal 37166 zcmd?R`6JX@{69WXsf4=OBHZdm*<}keh)~%>_C47tOBibliqMKIV;{?4!q~CX@)K5uG(GudM-2K2*de; zrwrK)f`1iWJ9UvEhEU3Vf-%a;$^2ULnhATwMQ&B&5R*sQc7i2OpB!TR@I);f1Ic$I zQc18e(hzoF0KZG{$K$-xi{7QDD_UFY;}lHp0+j!s`$U{Y-CAC&$eA($uW5B6f)-*E zaX&|S#gL?0A&anNBC>bc0@ie`(>-`0J`uMv${~hX7L7^7QASUSAs1^Q7Guqne3zi} zn3CS*^N{f8iSnykXD!D1DG7GWr2luM8=W-(`~0~*F9rqWFraoIQu zv(@U%u!p$n1SA9IT#{)$`fL$*{Q#_ z^_Rui7-hm`<1}nHHZg#@C20``Coa1L31QZ9rmVq)*u)M9Aucgvs#eBgY{2&hsy>xC z(!1;nCd4FWkZZ44gu#f}E;~NB*jkhp;jZWlp8kd;BjDoy{@+Zn? zmyHwdVcP<;3L+L^an4s!TNapiz03A6>FlZVV1b0h1nSq=#DvwaafvIGdTEQ>ySD`n z0$%5K_0ZhqHo^)qoik>-uQR8KQPqTMpQe0IWZr9>@!2cokRB#MV4SM$%$JFrK21 zygi|Oz%LP}hd%g*3?BiVzh+wOudYBJe6tg(_kOz0hq3u0Hhs=t-UoP?c^={**vKpn zvU|mk&C{Ac0F3)c+V})L3_SWgHfIj;1lVt$>7B3}OO|5*CC=Q3>0&j3na7bPaY_RW z&3(|w$PPF9ylY!3m~ntbyF$? zR%)Fe@3GQQiF95>NhO(6I6Y;_R1Kk0`L*t$Cnqk?^)BgkG*Ha|-Xq2(f;LdO?VB zRo5e@hY@$R^(BD;fBooaq9*Sx{Qen{=<2BFDg|+0$nG3;m+p9RLhUf5ZK`;Q7BkS&|XZhQ=im>5i%Q!vp z1DW|(#sKQiqZlDtd2zM(eU920->YW?yjCvFih-L$R%wiMgM|o9yfF+6lRV<~z~?9k zV$?c;S00?`5CGKnJjD~Df1NitD@rH{)N|_0BEU}+p%Yw}AJ@?Q`9g%@hb30v(a=5m z()~XJtKr1dJvI)jvgiX02)RM)u_%6E$es7KfR@Wl-Jk%Cq5E_$xV%-+&nPKvc2ms&pEyS6EZA8OA0;#c44c^V_QCluP0^w}Ez zKWEeV?(d&NWhGrsx2j^dzjGx!GJco)3RP*wxvVYTFiIQ;6jy8EB?ytt09aljk5QT4gTuva!v8*zLOf*wpr=?5-<%&@o}Oxqq<-PMCMlx^$P$;kzOo#3ro zTzS==)EK;SZRCN{Twi%8K8K)Hj(qs=Eh&%*VCIez-MME|s=tO2er)t5g!d)JHLL|Y zP567KL&{36rAks?K{huwbx??(V?-w=xhlLp@!1p}w%d~U$fB)c(6h2@_0&jFgpGT; zKX33{MY!Y8RHFMv&@BJ9?GHYdlGhBql?w|StfjVBjg%F8 zAB?6MrB*?9CfW2*P4f@|o6!efM)mbjd957;TKAeh%1DQC8(Zi6hHz0A}9=2-lfJA1)Dn?~Zq z3di{478z|Q=)}dWGlA>8yq%&IIuk{yJD1DqyNk7Sj-S=9HjtUI7}^SmtL8k=_^lPj!{qGNpwJS4OSaS zCIqjp)elN{s4ay)i)`6kgsB9Fjc=fx8;RGLP0`h>#rDmA+)*4`Y3bV2p_@4Nvy=A8 z+eDq}0BljmDFe1zL5*i*mQ1fpke!*6S-*tQ( zCua_RQ};Y7g4}DNkN1jh^F&Lp$B2~n*Zhpe$$D<4Baq(3<+DZX*oOSGGfI{<<)KBy zSkBTryPXtm=E$`huuyQqrN+bO|HAH-8(DvY|M&y#ymPo$wytS@=Oc^o*&@`i3hKLb zY<0uWWp`dgX_bZJYb^*gdz)8vgG+tTqaKk4*cR+h5u6fwoAE_<98$RIsIubEt zYMz`Enj1O(W0+GNNXGQi_UVDo(5)u}3V3$xm+_9YE4@2ED{=1v2T?HI5~s?1!se)i zZC~IdJV7|N=Na!CKA--wxFi9ezuNq^U0^E@I2U9}v&PXeCAC8;OCkNf5yYdh)$Prl zbe*MQ>6yOYYOkUccN_MGe3bHb5Bc#ZHg#Cm&vM=w7H;2(OAB9O_;E8?rF#|Z8C;7su33(wuN|BmBbga{&9=!H-grpmo0U@0G4n#ksIR-!3FP3*M9PS zG~F>i$CKT|6<&ko=(1)lp(? zf6GK2Y|7lp$Ee>NK0ybqZnK%ct6okrwp%egd%b^acF}gN<5`tRX%k`zUfmGvP_x#^ zRF9r=2a-g1UwYiL@$ulmG57I>@imLUCpb%peteaB7vQSDi{*R6=lps8&F(hxQt}o} zr|^CrksPOC9n#y(VuhUy$@z8=qbpq zTLyrHnVSeLw-J|=ScrfxyOId>#pN7e$R3EUE8(MiQ2Ce zMU3W>W`PZS!dl{a9)Nj4GyJ~r8=iY5-OuR)h9U|MBR{}Q!5kr}?Yvn$f2Z(io!7&A zm?62$39J+}ZE>*}Nv-dd78yIz@cQ4`_ZPJI>6ZCVefhhr z50%U+e50Jgs4-(I1}AD>0LuSAc%$|Y!YaVwfR80xrknh-PmIFfh(WllF_LITa2kJU z$t#0w*ZH@)|HdZ5mGMMl44%J1&eX#u&WoNr7<}P?RSxU^8}!6Ae)~qbfRO;}>kQhb z(BX`|=w8BKOP1DkJ4IJ%4w7|cloaHJX3$RDlRYBEp?jhy;h^kBi@@U;*e1vx$7B1A_;$ncJu;5Vq5st``3{ zA97>!K|L+X4l26p|4SuqQ)E~;@BUPPh5ury%JL$jrlX3kHeK10&OE^GsAs=A{IXvG zA0&XbgF0FAZ^DPYx~Vrskl9Mdk|McImyUW~B*AUND+UQC3$?t;FMQgAy{e&nZ{(FvU=5K#uO*N4QkQ6Y z&^F(5?3OW*s0%K=!E4B@XcMO7nRe_q_I`hjuzV(2E_+eN;d>Gc-^XE8xU>|nK;MQu zhPMkPsq%q`d=buVeurpY`j`P%MJyS zrK$wT2j5iZDZjQ?{p*49ztGj!cZX?<_b{e?n*A+85N~bX{Kvn=1a^&+ecoUtx*~al z7FNMvYbn0HJ@O}y65bjuG^eg$Yhkz%ztbk@4JG|p55>sKC)Tmbf%=0&bM6+pq7F`Ht=R% z97j&FS7l|Sa~SePY4WALy;Q5%h)FW(`{W!SOeElMes(Ld{WUu=h>g z%}f9=USY-v+}^~1Ro1|h8*iOoyC@Bfwp3Y!>%L?X18101?i0YZq>pP2c#1w|4Xjf3 ze!d#qsqZytnrdECN1G+ZLFNNN$sEEFNp^*3x<{|U6gtePb-W4Z7^N!ZXuc1 z`S#cOFb@x{|ImUizc&Uq^OFCI7U(VpM^deL#gb%l>sbdWTea)bBKuk^!GHF}-1B*M zG7r&<7EWhPT1Y}Ahan@+j-)W9_hE_|HfkI;R@#;wIlZM^nSpim#fKZGhj0}SP^GMX z8WyJTiiy9o#mA?8VArms(pXfoa;L98>I}B4&)6l&bbx4fF<#)d*NLHKi&$5dv*Fc| z_Tv`$R^vf8P`XVn@u=yW?%jS50LOL}0NlXt#h&AHaOB9F*Srp}&TXSI#ZGbLRP~`b zW02KcogJK==rS`mZGanX;Ud{oh&nA5leC^SrH$C8V@!>GIWO#xmv!Y+CFHb zpZ;EI;Pzt5V`=#K`GRIo0(dhwE;JE9aVKUt;j7 zA=suE`^OaZ5!&&-!kG|X46?&ElYV3auNh$nh&&T_Io2hVuPw{6fCe?ClN0Z zb5oyH20y>OaeTHF%OUGq8;O_!T10;b=uBKfpxzDM=!mFda+I42wV10CQL_UlUUd)8 zUqpZV(9#o(cWT)02JLC!oZl&lF8MJ@YpK8d))vFrdrig`Et927YO%{X9nD0MPQ7@BWhLISfC#ua{ zqNl4P@|QsYnU?z~i4wZ>)H+fVHKqc#?iKl+k+}~*i~E`B$&7rbGMr$nhpOPY7jUXzLCkFIUm6b$JplKQ-{L4q)+SHCu{-HiihN%N$;5T57eeFg2 zAOq;|IXnfA0R=M;C{u3Im~$8)85Rxvx8PVhLuZ;+q%o9IRGGlPC<417k&s-QjZ|j{ z;=sOa^K`5afD_tH^}dX0g)bG&EnV1!C9u@kk)YoqGK5l&Fh^1{z&Pl_Pw;nzE5nCR z10?fB)`0-w{)=8|G(RlC!YwYscjjv|gJga7P>L;a-?)V+f55LU2j=NFoAe~&EWc^Z2(m8#EroolxK++7yT{F5 z_f%Atqgy!BI#)Kvwx z;Q1hel_%!ZV)%M5S4g1~q|hm5!qV}+W^OV}*1Pw1g`TaS18wuqTmeH{;|!h6OCXQ} zHK1GO?33p=H8YcM_x{+$FAULX?h=-{Jt?CROU_2`Z zzhu`VLDc{NWbaQij0ubxLcTzb7=_9?r!XA$ziJCKg1N)A0y#Ot85rY!Jux0HM;%bT z;CINvG~1tECLcc3I`9~sDZv0Ll7z{DW)!-f4UW2Wy&SZ1wYzB0oGCsN3pi%0lL@8+ zA|Lbsplo8_Efd8b3Y>q~1$sN11^A1RgQM- zVQNQlP;6C-KP~&zwr;rG>G?rO+5>dGT% zcEnZd9*WWQ+ET|}IOCz=CdsMc+dw?STff0ipKFe4BzN}f9 zaK#b2mO29m#ELi|2OJxm8fHK7_IkCj z-*59A2&Da30V~_H4A(#6$|}^tm^EcKc(*C&Y-l}~I$PzXS4T*K&T>3eI>Svju_Qn> zq9seH0|VLws&kjM-w@PuI^h-c?x1URy?YlUaoc63fv{6uIkwzI>08O&WJf-1|8ege z_(Y$hIhs%p8bq!<5j&r2M7O{C z6Z!3jx#N)?ft*3Zu&?&hfx5@xdYdIDz2xMAG-w`{=XWgZ6a|mD;h%bxOW-HW)=2DY3K7mNGAoirDrV;AghwuPR@^TRgil^7EE-RI+I4>^&^|H3w`dSe;fn-Ea9isC0MoXLK zla*Eg7nZyhh!r^E4((pq0uI9{!jtXp+4(eSXPVBYzTrB!zb1k$`@J8JK*d`@+N~L* z-J7_8SwQ=CPk+K~gO#9O%ORr`Zsn|yW_Jfk|6y;#af-o~Y_pF4tWi1*Flw!&Gx~5K z7MZ>htCF-EFF6ALPVGP_<`#82?bg5$_%uvuIh=@PSm zouJ}Mp&mU{j-MQ(x=&Ql^SYd$KaHgeVaTzsPqOhkNV>OB-=8keWJ?-}=euqbC+jlU zVDdJ`8rbG1Je60I{be_!amKIL@|6evQMJhid{KyxS7v&|24=cRa6Xk9y2Lw2eVbSv zUQ&VH4$AMEch*|)uxp8j8>-1KW%3S&Lft&5(;mLH-@Y~xRiKI{Fy}Hun0ZJhH`Jy_z`GO%Oa{;E&}jn9h}Tz;pX2s3 zn?}JNCBP6l>f=W0?$9uotjz7gNUcd=hoiu3baiKX$bM-sh|hDJfS0#^c=wkCs^VYY zl|Hm-AZ;FPzYp8x6yn7N*>7jJcX;IaDP{6W6jxJ~ePL_qOoFr?hPvYWP+4{B;FT0r zZ@`_>k7e*EVsbJl$$pU|?JXR$BM>(2!70fOHK~T%eewUb$jT4hri;1$ua+{ot_sNk zQAv_(_LJ#;KVL7fegFQ|(1&74Uau`BY);KI0Fh`kZX}&hZE!u~b?MVHE?M8>V&_9e z!3R_+h=j_YE10-J>nrksvkk#izufeg*vhN-1@2{W}l zzD>ltn5;Im7FQ>&kw>od(S{AP{_#MIvMx=H;95Le%+!)Y!?B4GQFfXsw2`<`7a24_ z17*s1jgwdEaF``Wj6_UT_gT@rjbhAumZsHcdD`FNsX>F)!o$wBdEi-u~e_D-jXFitH0PLX?iffYxH=is#%*?3U ziulf&8Y0Tw2DG>es;#_faT`^p_Z{vVqFFz+#m->3+NeAGj`AIH>VG4_$B zN?(4$=Ws-Bjifz=p8Y7nsQ{Mzkz+8b-$|TYbQ%baTUqx}Jma8Ff zC>sIoTmec2dqwQ2*ddbQpGm?rbAZh7t(X0$_9F>C=4|g(!GF?3nb)GVcx7|5B$7d1 z&O}Nqqx`2oVRc4t9r|^^7U-&mD7K+Oy41#t8Egsarj?83_nd;@ZB!U%V?InQ6is~1 z8TFb>%mw5Gtt63kn_|u}6=Y{jL3J}N_9?ad5$9_;kC8;g*gt3m-j0_Sb`h^Q*7xA{ z&ojgIIHBF7OyR(IhO23P3vad87J0!pq$2>68qY^V}Qa>06{!xXnDgH z`4Q$hzMGe!-PW)pKx?G&TWXFB=>1(JWA#q-I0M#1T=a?9m-v@rBi_yY$}~Qw^?3~% z05>yXS5W2H7R9+U49xB&zh)i3UjTyp+<(>>?z8&#%kBSv$NX?$iWT=QW;U5q(^*@E0-kzE%NNy~=UsZB4Tujn>R_Roz zOSuYP`aVjGIQ!~9BuaL^zT0_IG95a-+IFZXWb>{gtb{)t^8T+`=~JQ35(Oklg6Cao_zg_txzIw8zGYpfS%?)o?o63cPu9DtMU&8 zq0gsbrop%4s(9sqi+v-$HvJE3h&jDJo+iB*dNv$18-CI0x0cFvbrZTi+9yAs zXqv$=(g8b9$E(eNs=SR2Cht5*B}#B=UcYD>xbR*wpRxw)5|l6D*bZxYA@{(Z7ejzJ zYkFo>{R~Go{EAbwg6F5a)a~0}vj%lPi!Y7#!R^ld6;FFg`_eljj%o@-RTT;Me^tI<*lj8eKcY<%;1KA0v9S{ zTs;vRLn(E|wE@vV{L7=W8?T&0F|AYv_?!9PS=*Z3$Qc(rjvlZbG3-sp~K>edj9 zRNIlVo57CNegSw`m6sl`k9||t-d6hg)BQyA4yYB#iP{govAUc^PEyvKRb|X#>_bx= zKl;u?c7LYvRRXnh?NT%}DGY!oElqrNV;14@hVnK>xz7YB<(qN0l$;&D#fc^9elXT?@AfaEUrOm+c|PZD zQFGONBfQVRciX(X0iJC02uN}$*+jX?mN4~S`~3Y}dpXrDIY0?GEvblEop`<47hN3m z)L$aj)tV2t*4Xd+JTf&qE=W&!_!2z-65Z1HwEts`PD33DNBc%p%=iNtE?PjZ4>HCz zRE0$shkjEUZ=@4&mY(fzu8jb#9C3<@^Ql|@F^xF0Gx29Z5&SixfUte1dfqkC^YNX; zoa97lQTFnw^d{{(GwvVT-34M=Qrjx9V9#Z@>Y2^$4hy0e(rsMuE<4ux6Ftr>R(a3* zAMH2+Qe}Id&r)F1d35GDfnJI(cLLTYZ{$P^Jw$>?A@jm(>VU_{$S{$gyrz3ui1_7 zR!xDu^g$mER-l%>Bbj4%Q|0vRotZTXOXaF_cwbuEWhI+r6)A;;_X#q8>l+ab$yhHc zpP5}9xRE+ji@m91i^$$mW~P)0Sfj{Zc%JDJ#3b#s-$dDK2omu3*`a}KmCCIs2Xo~g zwZayqC4XZG0{I#Ja_H11`624XS-kW7goLN-dFgJ(*;Lo?LqH$mPt$P&jhE7YQbR~# zeLV|On432?4Ss|y2hp?p-r#!DMsP21 znBW#guaEy`0hpqS+_P?JOV&R~r)2-W5BK%|GL$TB?G0t!mIdzcasV^0PYRzY`i)E^ zGv#go2ac-S^lrtz4S(SgVCc--y`=dc_JOF$l#`E8SgvO}M*0D!DG&l$J*xla9mJ>>u+Z4zYk&q;?Z?p@I#d5*0{WG7~zwSIT+BD3h#(v}a*X{|91P&%&G z*hb7aN9U1!fT{#?^Af|bz>?r1bPZU~nJai+s|2{FV}HX*t2Y2ZHry31)meO7+Iay8 z+Jai=z%lv_>xd8UbDTF&CkMWmTnT(w@sM~G6B?H=bdv$kR`~cK4l+vd)&DxS^qdrZ7@4qR+qVwWrYCD1&?M8o|0)hTS+OV_QP|uiJ zX@uPS5pzzxQHt@DdR;4rD@Wg~en;L2RSoDfBukF?st!MeL9>W@n5=y}Q%|3k{p_~nj9RXYB$IS@XoEq|+9Xjjz@ww-8 zklXMP>dj86moE&DF7AR7bEkA)F=`EX{hWIvmmMCcd#5^wID@YTnq^O_jq>m0h(2a=l-jxgs2pM}K!Jn3?+g zBn1`EgS5E))gF5((ccMy#sZ`pzEC3fq&XMsBsR*%XkN@IjO?Bi*ErGY4PGgcw^JBH;{j+54>Hin8T_{>kT1Tr!%ug{WnXq#xVApUZY^np ztD$3VTVaxa?vIla^<%;&0j6RB^pl^;x{a&2ie}jU;Lh+aE+}lGcDN-t)rQjC{5|=p z;Z@s?_dOJNk~VKI@0lxZ<4}wqz{g|U$6MyDlrO0J2R*%B`?jxyRjH#`xi9YP0EBLT zn8VpPtMHfSweX)QX2On~QIZMhSTUV(&|&_LYtN6-6r;LIVPaU#$+!Gh=>GI>S0U9H zI2}p}-$-G?>|Pz#qrLti+hcZSOs9895Ec+*lUxdlx(bCgOPy?uX^gk0oWn>1(+7OM z|M^5FS{_R>>#cweU-_6={gdDSB~cs`4M}hvX5juqjCd5_DtCqqLhl`rSi~@?eflvY z`>N5a=SLeXAwN$2x_UK%T-!x~U_`(oZM>jX_$))t{V30VGrQnGR?jroileTn8xJy_ ziS0wD)@DCt&o;6eYsh;Cc?>@V-|ezQy%mt;zs`I_?b9{>9{0aO@tI=QC46@g^y9rA zO3fWUNmS8ANuK;6VH<8fvPPdi@YV?WQ&at7NMTvCO<%HOCp;9vYeMzU6sTHCOOiV$ zz`;yEr2DaV%;Hu~i`8%RFL7*IQgKxx*-}$7U)W?X#}Bl+RXJC<1by3WQjN9Y;iTm70@x@g1Da%Y9wMJx_$>LHIkZ$DI(&83|2{8&~7z3Cp8Amz`1#a(F~q%``X zi0uUu=~L!s@egi52XGG+hng#l)vrXPDE9ce4j<_sjdE=T^$koDp~@2*2;$G_fo2!7 z#zRB)K7!b>jLrI|%75Yy&uI4M$5TdCdXdv3Y+D$A?7SjL!%) z4ee>94AYftB%^<8i0ttSOR>@Opn?iq;B;;r;~f*`fFF%m-xTLqcqJ%hQe2Y9KHjh9 zobK5;ewVKmD_mUtl=<@{TH)jZSrrdC?+H3Qx*JG;gG#odoM`Xr*yZWnh50ZxfL#eu zv}2zVm8`EZAE0zRJLmkc;j%T`7S#Hm#W6NQyOW957>80*M&bCY- zV}0K%-qLXC%rMye#BI51ukNu9w6A`AJtFATfcs9g6%yLGw2}2VS{gi( z7uOuo!*n%#1$+sp0Cw((wTeW-JkUN1zOEnF6Kxq@eNW}FIyg4QU-%8rV95Ee^1;fd z#1^M($69dY*^RPZMM$Eyd|T~c%bMYV_3b;n8?}+Cr>b)5cF8Ag!&gVA6fRrjBf=F= zZoQ2R-+kb|gZG@N?WcHS8epcDLEQ7OdVrI!1Ji;f|F>St{UP;L$9>l0Xb}dm5EXAF97Z#83mKfG( z9cr2c?zM&H5KM!Tpm8T?E=y~#8j;F&uga4LqcdnhonJ2vF4~Xpmbx^}1jxSk;o!FU znr6(7g*GY~99;}p6s6&n`uq#6LixA_p*c3^>6t@lLK{582>B_FVRdyBAhlj#$q^%| z!;9ic!d1ov{5GP9_4;ABt&Y21$+1ErXJ-Ost5*=|QUq-Hq0ll>VHa6qT`mG9d~l@8 z@%DWhZ)%8KgYpr>EJTKp@fjQE@uKQ9w{h6<3mkpSU=fR&Kek*k>T>>4C2BNL=0yaW zhInLhXwEOjmE|)Ivpo1?5x8bSsYjY29cU_?aE>>o4FcX-5n{WG=tsrdW!hzFeHfs* zJsJjKZXzr@_brkIF`Y3^>3@Y|TXt<#UhjA#FAnNfCnr+BpftG)RB=oU*}EMH#cMf2 zP{BQqnaeJCmtM*U%}WhSLLeDiuTe;T<)GuQxPnS=@IK@>IBD3yJ632n$YH^eA?!i3 zSHbANu1XrF0$ceDy3rN0UuN=Snh?o)IK+wmfpOqO3H`{_#>x7XmRZd$E7mSksnF5M z*=KAQj71TFA-Rz)O5%McTjX7AY4Yu>FfZg{)_lH^j{DBGM2`REh1#<9x)_pQgVngg z`}NHQ&Y1DSsANQPycX5vx-+c~`!>WnE>VHAGzu}JC*~NEuhfMRC+(Kl&tQRzmBTh2_d{I_ zs&S*J{~?*MUj>)-fdjMGh5C1a*IcF9F}RT0$c;MxY>|hGy@pH1T*6_-2VJ|}NfbGA zo!BT7a_dpUmqn?OZ`-J)oG~NoQC{d*#VxvUVYo>R{%+O_l?&!vsgAGJ+oz6l9&223 zs5O!*IcE_G-Kgl4YNI^>q`8fd3^VCp6=VivqejY^*Sl@{dXy*IjHrD$ogmX=;=$6E zSQz8FQBJgk3FM0W0UXTK&d?z30jq;erZy0qiMRDtaaXgjji(~p;(m@b%Qsi9a_%4j zFWBDQb$UJG=GV66V(#0p;yZU+lr0UYKJpqYFSO;i#Z7IQG$Y4GS6qW`P}=-1GxT2# zVz><}V378IU&(4dFW*&-m-j3_i$7v;s3PVzhp+IP2?v@d=do{{V#mn?Pl4>lY3CTwn1;8v*2|u;R)_mXnzmy7ovKCQQy^y3B-!WIM$9rY4Wb@O99U?^ zAZB~kPve{zTth%}CLo)T;FSsIP4+Y_#x@mZXl<@maPBNReL(ac9yU$MDLMNcz4pbp zwcfw@_V~64vqrUY!Fp*VQfZ`xBY1@kujBpazUgV#&qJsbQko>!%F$4abH2zQJo8z& zGTKCDPf*2F(i&QI?47Jk|F)=4z5T@w&m`%%*>hgkD4-dvWAeMwUzBuAh9&)AIb<)`eUhd3nv& z$9Oz57fTRjQ*ZBa#7(6)Xta&i$g<539(~_va|jXB!dF7E&R^M$t;U_0TW3_kOIZed zyqy{q=Pgijsv&=tyt0e!nSWKvPhK1CPSy?8qI?q{+zINVL1qY}7(szhlVOxueumJF z^=Avt)pz{J3rZuCi<+BYevPcv2GKe9-bFJb25N)9*GaYnfh)Dgo_oclC@c20M9V8= zxMkvCjl@Dy%o7^{T94q)iJ}E+)|(ygDRSX+SGi`tEFsE$&xY}Qu+aC35Eg|#;mJBNi zi`yTv?DO*KiZ^b?M|HauY20WvnW)X<@i}d)f$m7rmy{Wm*^wlNrJiXNR{WfRd{4se ztQWnVzEU(J>Kf<I z3L{VQY6Ja`YX8Qp#jLF9^qEkrk+(!fzz-rUuq@$Uv|2%ruL=32m?GF7{a{-L9e=Tx z*K`H^jdaM5jqvhU1rC$l-_$`m+Iffi+o-*q%E=G~H!Krg(d$8WhAu&$$b^>6Y$A8> zTnOzcWoDhiw0r2Vr0y?aXW{fW4LtpG6yl0U+FDrV`&Z*#>K?RF z?KzLRh9;QFx1luOY$GELpVF!|OtWNtJhCDkEJye~#=$@Cy7Q0wr$jF1EM{fcnhu&+ zA?DXzlzLq>IQo1=L-e;(g}=$p#yjwU4BVlN7X=g;`|-7$aMym%F}aA-pY|XKwbw6} zq$|drL=>FQb`47@H5@w+WKOm+$;h(}x5R+gEHkuCYk*ld+ORd(jEs%z$x9&@bUAcl z@Ajgq))#0gKk~yDOAJ$80yov@mks*UrUl*O%NE3$`_rdE?Uc>~$KU@_IRNVhXLvoi z2qJm8n-wnUrOMh?I%K!9g{-9y4%Wb#4K2biGE$_+IWO)I$7giQ=(8waGQTu)jvW5{ z_1315uKdA9`Z<3;4E8!DO!#>J!)biOSc><|Vc@R@_H%&?8aRzUiDZy&Jn3|!t)i46 zDWOU<))oKMFcRgJe45ZBJ*X?iN9e!pmuEH)4Wo5=W)oY;(}iUavF9P!QwNQolF1y~ zXD&dZZR8=IS}Kgdt&q%BDrqgFc-z{#mkqBrXZ`#7nQIP5d0QTeAWkY4~4zw=LD)%*)D-ca1a=dUNfRPT>%$#1f@CuN4 z10XLAqZZ(z_DE08dn!av-_$G+rip#$>eEKvM^P!%?HYO+1vm{x_hTAPYjN!l>Ay(zhUmm!K^|S zZ(GI|Z#N-NYC`Kxt#lUc?MJ9DZFw#QIF`|UO!9;u}Vh*4TZq03r zh5Rg>Z*sKfujY5XApB%yy@e1AGxDfLL}rlq-NoUu5?h;`rFvwq?@;6X7@{@=cN#F3 zT?DvQ?QClotcQ)Kui&+)a?b(a9F`#`H5;Dr#Q|FM5feGs#bxp*;!knhF21EAc|wQ> zB?6y%2~!e#eg#X(a&nDz9cu9{k=-2?PGgIaFTeNJ0+TCV-8{KIp$wiC`Db7&G(D~maBP_kZzX+69+ znJM+3#dLF!60R-~K)>}^3JLh5mWaGe-kaN>Tl}&K$gSt5HmHTdCbF@ej`rW6Q4Ed? zB6$0ouHi*T@!K~s;VDfFAjwbEqa-y<{0*fSkG*Utmn&`m8n4cQP#f4=H?h*q22cWv8 zNvxigTJwHc_AGS!04Su)zMtcp_8p@?jVO z*rkSaS2U|hg`xn;o+`jwYw2l8&k~ca9eNDAR7~X9wV&d%4U#g2OBDbJhV+(HN!ZzD z3#8yg!E&P8nK!U!l^w?w@c8;h;50Djbs=u=FL0g-1f#MeMLsD;5j@79Shpg=J+f58 zFq8087@05~)Tp-bAIk1u-?g8ZffM{^$O%6~hFYR^@}eyR#Wze~1he%M?nPQJ4uITr zgv?^Wy!j##r+Y@K)7NRSLEBYL(0XQ%^e?qPz92+`vMVQ&VA*V5{O99@9T zB)k`AVq7h~un|ub#2jdU>jvV!qG(?y>3I-M<-t_8^Jzcy0^@w^BN3mWzzgwHNHMF= z&()nPbmgN$`RXJ%Q**5)(RRgbPotJ02P;#B5p=J{@$soE2o?h&~;q_d^E5 zn*2;x4D6*VFUue)tBdHJ_;EAEbR!IC-CA03H$r4CvvZ#UHiD8eV|MmoOJ;pG@4RT% z@WxU^AJ1|qg2;do>_Qv>`B#<@A7E&tV|F~s_T$GG{=e$pGp@<4+ZTo)juC4+nH4w0V zVHmY51eZN`6nvzFz1&U^u?lbv(q<0pD-j(xnAi8!;-5=%CWAP$&)EZCIZm{c@99{x zI@%lOfD{)tn0LSV;9;`$-h^B5#u_{1=qHXYNiZ(T=z7?#=%vijBkAh=HE^G?>Fu}!3F zQDyK;p@?PNoUuUK-r0(|u=%GExkV2si%CUyh;q~0 zYDsz_w>dPq(O9ATCd=Gz{K&Sm%;=dV4)k8bjvO)(8v_X8u2&9JtL=^l`4>8y#a*~F zC)A_e*wH`0;tfsQs7-4;0*aSP&-o;t-t&sbqz~!cJQWhTGK;aB#wE4vKJDs|3lAZL zmLC}J@VZ{5+nXX?j;w{BQ1>XxN!vZ{HNAPYxgJv{d&s_mTdjDnu)^^IuD z!iI(=xQMU7lQ@_K_2JF>?xu4$Mo%t1W!6dW(B)x1hlI-c-?B6%uX9D&IM)viL zwJeslYhHqvG?9*4lN zl3JX_ag=v_J>5+!8qe=xpqpG54VKT~r{EVGLizqcqQEc*4{)RR_)Y7Z=K}>#ckHEZ zFV)=2x01oy2F@KXa)cU!V_iF9KYHfjf(-A1p@~3(rVSX|X_@JNdcMp=5)8RruKnhc z2qT>}H%D8-&r|u%;|1TlMayBC&XhasrUuFjRE?MfNdDR{+BN&oQjXIhiLI$uDqM3& z|Hrn5h1$99LwL>m;8glCNMGQX(fTX1?OooJ=MvwKI*p^gY=Quu^TxDmX_1vm7n%@V zl`L7*>UscxycJ!>@K!E@g86R0Did|Q_Z*{MJ~p;*mCT4*d1r63hC2Sa$&Ln5{>gf1 zpL0>PXd^eJABP+lWsBaBJfYmI35Fv)v&NQXWUGzF0+v~$T=|UW!h~q(LwVu> zcggQ7cYNIvtZJg=i?`lSZL)Hu9)o0GH*@RJ&J4Wghm#f=)-!%lU{Y$#%Hyl`}KB-C5W$j|;?}!&3yD9*(cWB(= zH|?stmL_+>C&ZqSX|eMG1Exrac=H8OYVym&H5rX_sHZ5M6C^e68(+s-N?%pXQ7?xQ zq8a>*wdQ8sb2(bM8aoBBS=g!j=<+WQDO`|l+nT71BscFzHHY8$wl;kWBTu!g9({H@ zWx~|7I6LK^fu_2t_9@%b=?9X}^r1N=`D3Aa_D4d=jU);?-q+ACxzw{03`yN{kztc4 z$Qw$_n;karIO?RPJUH>wtEOd5zNNIV22Cmt#;W|?;%d(3P~7dJ*Lxb0&6xO)-Cnt) z-r@4}D=*k^TRQCHGOX5*vIpO075M^9`nGH82Poy3tF_p9w}n@?vRp5CjGoNr4BhB0 zxy4DYFK<~RrsE7sB-mdS1a_g(SBg@TUHN5NIJ9=hV7S5>>QxE?UH>o}) z%dDv@;c!nv>7JNi!o+n^3|Go_Gn-X`SApPPu#<4AuE(nX_ety!2uJ~nURNP2muBXs zJ=K(yFyi{0JRtvgeA#^1AUspLwTUX)-~_b%b`3`M=sNk4oZy~XgpX4;tYmxeN{`tb zSGZQ}zCwKrQ>naGJ?Q8`Bqp9Q;ZTUN7oc@y!9&2AAZ}u3W%=Wc7QL!gTiM z{%SwL_hkdo$^^XmWi#Ime@P=rpC0M6IF4y@E}1GLNNa%i{MkyY&xPtc*&iwRbt?S5 zbmk6A@$J+od$WoGAY}|ZrjM|^D2dAcilC(e~y$S5jjVo>v3m} zFxJ9Ic+PPG1@C>W4({<%GnwfelPcPXDe8IJK?yL&H}x;BDF-Lb(fOo#w~+@JQV=pb z1esQ-pP4;Ps=5C4lLqNGEk8{=@m9>Nr}A!Zz$jraA+SH<-i8~WhKuO>8^O`(MUxZF z7uU0|o(x>`l)=;J5m)N9VvBR5#n{(Ru8qG?RZ75sJ3H2rU!@~I#*vjzWw6zcA6jd> zThsAG>dS*>xLZ2!sF{(AwA*=Im%}$7Ij=8)F|M;pH%F~OC}ip<$1?hR@mkkh?|Dez zo=@bIJA1d}Z%qBT<(E+1fWB@$Ah|CA*z|0$2b%B0!u1Rv-njFpi51r_AZaQ?=)D@9 z$z9VWNizA{Nz9NyOm`WdzAn?q?V49ko);~Z$GF|}CpB3YAMZMKxg~YMt4@?U8hsdB z`&;>%;r`VL=ov^KnNtaAC#xx3wPL zx`dRmOJ=bRD`c{l$TtrkViB4r?=mp)iQkbkVWIi~nivY7FtksA$S&`-Uy>+?EBlIL zZZs?O#0DK5V*7av>chk#sMh{Nvdun%2ma)DA~P6@wN&o&5bV(!l6S-wT!Bb8&5Fp2 z*ViYn()ufD;Vi;)Oz}e?+mC!Sbg_$*PV<+^^3c%Ec|jDm$uOvM<&eSeb^ z)8h}G=45g=*m($W!}4mY3(>zAbS#XzLIXbfXj?pT>a%EM*)IE#;ahTa$93E7oTzdI zcY|Y(#X@9_WoC9;T|W5~R9MB2NpoLY1nX70;$$B)zc}?Q-^fpYR=5wYd_o9eJjeM6 z&YCoN>=0zzIWQ=?;k8*+dKAX*bFkPP?!rK9)%P>&b`JJfFW$na>d=H#M;zBP^Y?Ck zbB%e|Pwa8aD-$+X74z~71=r8d+P_P-_?VBb2V$b0P`W9*;2Bm6dxjrveEFB-f+^YY zAZx_7y>IwG9qsBmq909}$vd?!yd9IgiOEq*F)UASSSJyNmiJq9x^;OppYl-em|ECU zwo}W~3B>PgNOX2V$x+CpQ8Jm0k$5YAk}O>qccIP>o6p`@C6CzDrg&YxF0C!mZ{lUP zy7DAA680cmvB*x%56orSQ7hg2b^}Vyu9v@7II1qxu#@kty5pTE9`Idv_&&&iSZ^X9 z2@;q9ztcpJb069)bSzBAczwoS7%~$SD;5{>Sv}m`Yf=sN7*46qK+K+;m+CVisLw1#( z1wCY$ooKiFp;g{8n?3H6QSUzB&0df|sJ|10*F#rAY!XuxO z#M!ApZ?=LsI=zY6OVd8LM8_nAyxb$+t6CevIIP?z(L$m3`n=hP26i+R{)T+51YjQM z!agl7*?xJT?uW`L2~|b?bQoSs?r?n?ew{P%T2z{GX(D22i*)%)MafRFw3J9~SxI8y zdPr#RNcyD}94|z1Z_vuzqJLMr=^aV)R8pt8xR~eii6@U6ccfC1RMFTt@4vhm)eqMp{jvr**;x@72*1cIOU?d+pE*I$5} zgG!Rf&Oj7K8LD|Y$^U5dVIS!CP-R;lYT@D1aInS<48 zdQO!iL5{$NFS>n}#w5a)BA@JUGh+&%9sFTACbw7pfwqhdfRQ3Ts?lWiM69K;a;siq zVTB?BO^z%E;ug4*%CSM=tFbdWF*g>ApdT-WhkH!m9ENLQ$=!7TAR;twJ#Rf%Qq(mT zGL!qCTrw)K;I(||1W~+gy5(RhL?2AQS11sdvjp(I#pgo@!f^D)*B&hl^P*&$t@GfPbaeM z_JocEsPVwEQ0CE}lcy>%4Ogp;Oh@={!tFUh&{+w!rVrZ$FG*Z8@Xc*m_3A}@+>$N_HFW<7Smx!=cb zJUTSXZgc;WNu>`Kv>3Io!Suvof^2}%su=+2`Q#L5I+NYMR4w=P9y1^OeZo9F^!{sK z&DB>YO*aytDyVp5*WwHRJhCNF?^tLqE~nJw*XO?EE$8mFeg19R>$_V!xv_7Ed*k6q zi4VUkhRkEgfms;>u3$avWQR&iW(c79yGq?_5Vf(aJh?{mg?7=7QPk{{CP9Icm zqy5wXZa!1XqY#1X=;HZa8#Hq=N9~Z~z}i+E679J^e$3jP5$A+;cv>Q+VPrZ#`18Z0QKL!31`-%qdNfacQ!6=VyE2H} zS4R051(c7j`sM{uJ}!){h-p?qy-xwKUL$b_AV)6C3?*lCrNpBsitGK8nfgSzO7V7g zV_eBpWy_cO?<>K~I5|ye!=@=rx|Bu}wHJq}6nRLFpV8*!K*w$qlUpo!eM76e?Eomv zrKB|6XJsgK^La7iSk~Uc-VX`v6dFd2SwwW-X=v6FpzK6~l!!TmFId;bXOtj@2jhOb z?EW}2ujk{FB*&MaU+DPOX48>4JG!wxG+?tCU^vv|Ot>)s&!o8KIAB8)hxVdR`X)Ez zEMflB40slFz5iWv8X`cm?raVr(vNKzV;O8 ztmS)-EU`G`p^0&m{i9WhUO3ItN|%ZLF0^fSkxmG4uZN(ez&oC}I4R%1VY+dXq!n@r zzBYe1honVBO%n2x`7Onq_uV`|NuZm5x^b){7``vQ_bnIHd)-@q%;Eb{LwoI<(Ib8h zWIe`Mf1TLRK)FaeLK`bmhkG4FO;!*ZkF;Y43G!!_AKOU5>ErU?)&&CNE^;E{dvgW6 zrsfXZPC&kybK+xeTOkjki(fx5gd$0pFKJbpMLKFzZZpfju4nvxjByh@%Z7Ex6RQmw zN$L`1=nDN$2Hh^;7FxHcJOr`n{eGYx#z%Z`K?}vA1HS4M~X?FkQ-AA21!g3GW^$L z_er*=@mBa32eoK{qw4=Ns=-WR7HVUeg}L)6@sepRf6YmWnBP>gBQfj`H(nMkH6W>F z>jODK=R}@5sGZRJzTdF`Y^lMPmO-o<`CDNGwnDefS2nB=u}W|jueU*z;DynjL(H8T zM4Mq1TiYKClnPCPLqwTs=OdZ|9smGr1LJkgca(tYg4h)q5H1M3#c(gOd43K?wD&x< zc1=j0V@78?Sc<_DI#{{N^y?hloMhmMrdMjS?a#u{*oDv+ZyiN1q#3UiR3Ype=U*P6 z9m#_IH<)WL-ZOtek@kHA{;oasod6%A`CpBcII>Fo3{@z!=X<;0z^CGJbJ4K)nd|4u#`NA+2ANvY3qpoUaz4mBCmuUW7(aeN99qKY+QvIpR z=R{81O=bhFO8aY;T?^r;`O?qHc8>VE%a%n?(4|a$`6A%W>8)HCDgQGN-6toI=C96! z>ye*FXZWao^#xhF&OJ;vp?7$wuC6ipli|ylg0_!x`}{CljemlvL1Yd_c=}TY21tsT zDvGPmeuhR4L1Q)TRB!du^feQxW?{EQ_}ivjAY)`ZjO-0GrlWY|G}&st7e#yLTWbS* z%!@WY2B24|c@YMlUSW49_EgXac~k%ub8mEoJ_s8?%=A|wukyx8mcNy6j1ceDFX)@N zLFn~`uVx?$5f>Qqiosrr5rl3Z%g5p! zNl1!RhuvOb+%%0J_!crlcEk&BQ!|%LvZ&=R5Iwp0r$~*_dcJlHI8m$^Hv3czkY$7? zg3lhjs=tXglonZ})r?FhGkk^14adzWp!|7v75{>$PgwyMb}vIx6rBlmDU;4d;laqx z-rc)p*!UxmN0t(J*|baaNr-4BGw{Vsc#jyamIrZzytVtHjS1hr4I;P6DzDp&zv{Ru zqn>cD-ve0|DDB?!eFFru&wm4&r1HI?owC#T_L2WUr4LI|jL!vADm41pb_f?`eD{A+qYnqO5-U7HJkgU>}yBU9IDikqnB#xE^3j1qTVq zx15O|3z!U&-YjPB4K{T#f@JCPK$(sc zg=nYLjNq&JfDO{#N$BysS4R64q9{T<-eT~ST1PKBgb~}h4Sw5K z8H2Sb!_mHL$6?|@PJ;wG1~3wBL}qDpTNWL{#k4+_JPi)|v0 zZY45{Uy_;Vgf~-)$+yRyoge}{wS7cFx6vqQY{8qvrjYkmK$+D0n7Lg_ulzeE_6g!A zC#_>oBGQpQ_jcd;!8M{Pt>e_t9@(OqRqzlT#O`}hkiOZWln~Lr`2`w%-5>9QE2nju zGIz^g89${D7J|;WV5*r7JQ*M#n)NUjpqk)3g5M&7$kMe{`0mSwI%%l@RMST`GnkY< zp!v@OGd4f`PIxvx@QmI^qfkoCF{?ZcBgq#;U$8%uM+#b_X+r}#=Z1o2RvX6SnUs#7 zFL-LPsn}GH(WZ-Sud!j%uQ4IY+q4zHm8PC7vm*f->P5VT<94_0us6%2pbzLX(<@`* zQq3(aNYcW4rVQN2t0RRNWxz9bkQ9}4Xh8S;IRCphtw&UpwR!j`3j1Eb3Qap8GY%PE z2I8M}&YcQ9_U;u%mBk-BG)j~C6sb0}=*kNf<%wr$so0*NE>!0#P2cP~F9Lp;MCzT0 zZqF3EY$Vh&lI+yPY7G7q=M@VYO=tf!0Xtb;Xde{g8jEnDs}$MK(B&<+@NqV#bEjaT z*Ag$=M}S97-(RmCp*R9b{%L>zsj%h`gwmN;U~D0-Q-(>Nc7t*Wu%Xzr*|NOumsQcF z8hyI|d&F}Q-9MB)pMw!W%Lajzw_>J;FwMJn#L|})dvx)3pYJmhzJWg|wi-k~sjf8J zF#G$UHId@}x}`PRXug8J&xco7i?Ax)?|CPy+WTG?X{Al`2j zn*IP5s$}wTwGQD$*L{1_45=ZejWl-&u3>@D-lH>$RJX!wS2Lc;W&-QiK?hV?l%-lx zhP{8hv3Gu!CWP-pZD2Ym+%ii&)-3e3L=Sg&>-6}YsI^jwyRk@w%HDV0>mG>qlhk!9 z0NCP2GBl*O8iL?c=+PYR@w8)c{Yb8;x?W?*K9`vO zAo!`V?64;LTqU}G*JuvmpXrbIHg8uWJQdEaA%kMeTrmKY4;qoyb4d@P%zNt*Le)mp z;Vje|2_-evwsUBqo35&^82en<+T`eajreuc<&mu{V`Fk7Lb(t{&y^zi}}FJM^*HJn%&DLO@S{41h)tvu!x z#LmlO&6%TL^?aG!9_RT|z_@2F^26{ei~`-NmA5ToUh^g7EM5ZJAO1bUCMnCk?xH9p z7#!I`M9`w7U}?-~`u_84{Xc1s+SrXTV&*blv+{PNEq7nDwfZGLga5^6u%Ie)r{AFr z@hzU35UZknWL9vdhe#{$9bq=+8;5w6{54OQEgTe+Gs#kQyh40U0%k0 z(3A%3b|Kw9P=qt9HC?KbzorYbF*!JIgvQUr`Jev!oIohAYCXN;V$KRXmA#1~A5cGV z2S~?U_rK~?YA2f(=j=K%?Te?i%_bvJSW-lh5(~y0?K1+y;4O5oHQced;Hz3zpZqn7 z*7_4{YiQ>~g=3GeXouR|$1BcX0|@k)Ua*BD*3o0X_*bq5xAJJGAA$1xDd7ne!;XO# zeq&IziK`F3^;Y(4+Oj%+8Um<4um%Tm2DHm4y6$#*C4-nr&OeO^($))=<|`>}-5}!X zQMI27-*{GX&)XnT!XSB8EYN^~5ZWH||qf)?3utSQ!#N1u*gYcB)NC5xJ>u zz~{HtHG&5+(R2~$U?%W@%u&2&#Ixzf7~0wCSjh8-JCWG9y~rSICY_L>OExmf=u_*6 zw@< zxb`=Ru8t`;t#VvB61pP*?BBJ^Cb5OP>0{VB(y)i741ij?->Y$Y0KCA6!!)a^aqV=` zbX89W%3KVLZpM7wj_PB;0M9tlzdh;7yRjweFE_ zJ-PN>l(BNEO!HG-ejE-_H+nfI#kGy1$79v_q6Q1y;V*7hTI6d7&bhwFUjgAHpf@r+lw zO|)H5ubK~7_w*%t?mo_ONGOQIb(GG<@r1(g1}+4bVn!7FBk!RScwA`>r>%m(fcOI?7C4xE;@r0YJDpOeGEA~LU=80t zxn42qA=+Q5>3D;L?Iz4{h@|3t=$4N^MYfi| zRSSa{Uaa<4S>*(x-aaMfwqvHPc@ydj9c%UbwZt7r;kL2P7PN35$yGEXr$iXVmTest zv`!miw=&^FF>2KX!vbSDsv{}(e>h71hf#Y@)N!QqB+-Qze0K$m51nzZ4QgqUP!GvM zVjU{%We7<;)g~TpGF-LaL3tq{#<(fz6QO%Z5D!0{bnoAOL1E!z+?3kcwfxboq3l~9G*J}Y9Y|tPkiYUY zVHqel)^3XDAPgFa9n!{Y#jEjh?BA5!%kv!BDBb9lz{Br?Nu7p05|TBzZK11uu?^Sd z5*4&W!u=|>Ug)}h!8Qv|H=TXUNMM_8_Ul_jH|337D#=e7Hy0eQkK2=<*!Scy{A-JY zy^J4J6>D+-Q{J?ApJeD#9rUVq<6~iET1`QNTr-n~-NOa1%@A&)v6;`vw1n+Oo%^n* zWP_iqV=%Xu#ru~|aGOmT(~{nYwsZ*$MEse=W`a@izBRjF z)Jc$~a$o$vmK9Vq6l@oe85m<<@*5rr=N*|syO7`;)9gB^ivR7jpm;kTAH4Gky3_hu z((&2$u93(vX8N)H%js<%R!(YrshTNCkEYFsFcH|+Twv`dk=vla4X(Z*_Ez@t`0>-V zOS>G5PV$^0!{Ja>A}s3X#C>1(jvu371&Hod8zX?7A&SP> z8B5`!A%mt%3l?@{WKzVFp{*;oJiDHr3tss6$fq$J%mYIp0&#R_a=JT&-iPF8Q67?h z69I|yi%hImzqEd5^Y0m&qfGYI(J`|VmI^+Gk|3@X=kZTNq8Q|GzwE4;ms<27J}sP3_cV)EiXZ- zm>EpyQO;*|#%fLGzi5TZ4qPRB-CdVR@&NM&W75_2uHQp~b;%J=LJDep>w;9$>#X_a zuiH}?1JqW9fPIv~+bM@k1pM)=Ai%i%^zMe#{MOm!C5N-QCtwzR!ZH7de#04*cBlqm zw-tarJ)i9&za$rEyr$9aR}6T^jJ*r4n4xeUy@w-fFj_P>Fz<0hj`56p#rD{XjSJ6+ z!3>T~96KEa@N(F~m8r13dcRYgpqPP^C0gPwtFd|k;ACbM)7;~{%ssz<0p3I0X9w3n zig5?__Z9+3rs`7Cf;15e`N)jwyGJDpJ2a{c`^obIv%f0DG(E#IWcZ-%xT~uRplB7L zlRiB0%JM6md;G&iF5kFrCF1iX3_p~={nG%z;w7j4EMDkyHU3unk}N~skysO(Q#jbi4UTWvevCF{i^^kgqcaNPHuYb}H?wi3{B{aKIKk!zo1N7PU@Lcw0}#dU?e3;}*=Q*+H70 z3s~pOyqb@BG7_+CA%e7`?I`bPaHBCqUgIm2ie zo>0X*6xS4>S@l8o8%Bq+HMwc#fQ<_E>7QZl5G{a;kB82V!nJ_@w+1dk_=}Cz%cVHn5j< z)GkzqYlaw$JJ`O<15my9n8Qfd{OxUX_C-^THzlH<*ste%RK(#VkIND_37vNg6Gg`7 z67~3})IF0>>EKuf!nyeYe8N<#9k$j<83^YaX>{ED!e8`V(RPrX9ft#HfwUh0l&DlV z?}&vg2mG(^7!{clX5j%`XzUa(mtiIdUVE3l=T{tL{42LgM&qed&t`7hFW->a(4-C8 z@5ghVqg#mKF^*#E*Y99+*kLVV8Uj@IhJtIBH)Va4TUpcM|M6sVcpSV3TS0JvUB3I% ztzH4#2^5sl^4z6P>YLk>MH{aj}4|w!}9#6RYM+03$f+)`&(8669g2M!n-xi1lw&o8D3y0TPOvSE*Cg%{!nLh>Gx^EL-Fp{t2oiN zU9z=ZGG4}B;y`%;@7R61fn2=rUcLDZGsWA-OGH(~)}J+8B3-1h@-iKnM5TUX#(@22$65h{n}jC+naC>0K~SM zK&*O55&yVwx}TFi5`*+HC$gf$f}-p5>^}>_rDVO3Uc-nTZvfvrF6T}M3ZoPAr!vI( zcBwOo+DFMwcwgV&=Wacc z`D?JB59E{pxQwQ$DEr_Lxr{fVrCzlu=C(Uws7$=vBH~FvIj4;FtmOMPRM_tqv#*(P zr5bEJmTvRmd^9(&4#dI+G`4EOdW0pP3|_{S;zTanSi~kSly)su%9WM@8`U`XiB>jy zif0!l=K-&5Q46krg=jN`Gu^0b_$KM@>)f~?=yT-LVh66TR#MjGfX<5)nFi%1!FBd3 zDQK5tR`I?XNT;sqfu=HNOW?oi(gEU$uNygIYODE(nBqhORvg}x9+cE&{!=g|&hZ7^ zoL=}SqJaC{@Ed-{22d>|n*pXehb7akVem~l4B6N6kJmQKkBfkFAVAxTH9?T*yt69(|7p$l{L zvKjWzZpmmmEB-c>p19j&TO@@3feGTbA0TUgkNPh(2T|$2iBhXHzehw*v@O(`KMK@m4(H>__~6bIzTR$ z^IDbT6YpbQPhh9i6_}vSsO$bwt*E?Is&82?^5n{;@zSN2UdB!FV5}S>5GBmcU#2dC0R> zhA^YU*5@5H%iXP0#amiG=c3ErnXZI>5R7$yE#6B|hl_BM!!KzNV4=5T`gcuSw0C{p zm}U-j(z)>ij0S!=RgsvAQ=K+<2ysV5JDtobMOF5{>(|QW%u9V3T8>oHwmu^x*ZkVb7B-@3!`kk zWgaqfNumeEKD*^Eca!a`pby-;=UKMJqcTQHFvY{&Y|C!=kbC`5RwK;?UD~pnM?i9a0)GobB==^wO+(L#dN@8QSIeFe%Jv*dYxq} z;E%sl=%MRc)O}zZGY16&30LO^*66{-a~BWlNfY6FP@n)~Gh%vwG}Eh=PUN#5T3~x- zWUG+8iOHniIb@dehs~epVQSYxUmbD7vJc4uAH_3Ah%Z1q{DFzyUT5M1m?S!8jfG)F z8yKKR<-rT0XfgMx-jhHmv%#)t8kKK^pO#uFO`!*=s~;0p6Z5uTo^#=z<%BiTgVNv~ zShP5KB0L>xh`zVU!tnbvV{if3O^UGZM2}=a0B%n@`n|cfX>-r>Zmp?rKQTmTTh+38 zWh5C%7o)?g0QAbs6M*a38j`D5YnesvyQo#%uq)j(H$xVLo{%{LwWI(QFQ3TV7 zlGtRCo4*9yIgr9xdVY>$)$fRs`t4gJS}|u!A8?=BUH>$-(bS=@biw(QKQ#R(KbOqh zn^%ItMs(H}X;i)T&QG4c9gCKyZR7+$cCS}qwp*XM9+R4Bc|F=^p*V)UbZJaw_fBUhp)TzbE%OI{>?F~uMg1H<`Mp1p zpnm@rHBwfxmj0$QIUatx;LtA8MlJ z8)h`AkHQkM2l@mCAMRR1!M#mx6JBl!WC0pSL71P#nJ-0RBKO6@NPouWI;@A8%8)eJ z7<;g4{il@Vu3{5kpV#IUz~&ESj-W=gkGV6sB{>vU57GDzb|BISzI@9>{vHoL6GT%3 z@WE%Bcas5EN`LW=kZ3*wq9`hy6PI;qir@M1SpfZSTczUo+O5wCaZ6z27yL|PIiVLQ zpA)uY5kY@O1Gk4c!TWs}6(V@V)T>5l&mBSnN2=6U?<#!cA!vou>v!N`<+@J-^T zo#W~YsbvexL$nK7A&RGmZ2H_uYrXzg!53z5t@II3_SplJ-AAYEQZLcXIoQ&R2hi7B zpE=&_(kek9UL~<)Q-zO5ptQ6pNb<0*OC|UBLl`9OUq#2zh~mG8ZuzHm*wLP61s{Ns z&=!ACmVwBRpJV;EjPa5BBC-@11MPh1(NT&C(6r0QD`}IU*>;ehMm*H#K@QpZPpJbx zW(td2x|2TqUau>x_WTVlc)-u-rW|mD88bi}HI=Lbv(G_AQ<+;g=|5GMrUfy4&G(|l z#LvZ0U`5EJ71{zF5-2eZD%x%O<_@iR$!KUy&z!wq*8Pb zb64_`J1og~TGkjcjXX$0&c8oe+Lx8pAL1si$^GOuU}SFXHif2zJt5w$^duxZr_wN2 zfs92k?YnMqU-41U#h(*G0@6rpB{cDO>16P04g>8*DNh$ zqnO$~nZlAS4*m&wsTB3%UP>qJ&vVDMPfP#5TKIAIWXRUnHU+U zH113KYM;y;qEAYoDJu0_4zgI?%PL^M;qt3qr9_(aEAEwdPxe8LGGt2{11!!cBU9-U zbUDDp&Kboftd}%MY7~2uTCQtL8`FCQ5vUaGltGd+J~=6DV}E5J?M3)MxYUB$)7v}C z(Gx|K=Je<`57~__yVXs5&U(KIAPvsF0|%Cyf8|zh>L)GLbvR|mmF@1}j)66I5#Et< zdOI`LOw{CQuiy#!eRr7!R=}^gk!-yZvo+^n^dEEa0RQOM^=}C8_yv#LI2}Zm&b%>R z3+_*uvc;e>M8H%~xcqou`xabs@E_^@ckCBJEPUFUd-ykoi*8k}AM?oeYN+GeTSfg} zP4*+aOIj|3vzoa+T#F}i=SsC&0V{(~gZ0D8Ccu57u&Qt2wZEQu^?>uAQ`%K3hx5aA z)lAN|^ZnZMnuFoXmb`^&Vv$xsF!xBcO;)I1`WziD^D93XMP4~aVLPVZeB-|}Ek^>M zJWTmy7oo!X_VjLh53J3o!F&F0^n^QUDg}e^o&ZSTO+P-(~j=d!(I+ z#lyO)>8hb&r={oFUnEtUi3(2SXf9Ush z!0P05;*y_5`A-cYHILn2FEJAa7U%^#YN%vr!#Wku?ua!6w_LXWv~r&xQmDP=O;nA4 z*xFS!-OkWPKx0XjqhZmiL#&dk7HYS+cfS@!m;Gwy8~_G4V}xIg|96Ruw8u7jv@%4K zD4JOX+ZD?b_u`^+bfrt4bSv0n*ZN|Fx;tLbmOTWkclVyZ>2}~mpjbmL4k5J@`3^he6d>EZoJqV zSc@MkH7_5(wv@kZlq|;Kul38?!AC?PHg<3z_J5{#VfoN&4*hD6ol?>m+{Odgbc1DlVdjF6d}b<)7#X2kz6U_EdEB02W+RPN#G6}0p1(tA*R|5SNLhmI8G zCuXWxE*}hO7&+&dipe8NmPGEKXh!`PTa+)GX)f+JH6f-NHQ@We#<%5e-pOIXya&Kq zf8`jrwuwze`>(Mj;vP?rE+em&@ z@m~xg>)i{)$hn^uB<4g)@LW3kqF*UOzU%*iMP+$`tmAhR^DAJteaKv`%lY{`*ukR z)feI?A26Lq_b`n3uD1kpCu;3~DVF-p*bOvL`t@zXjem!fG-t&nXNy$(&^tOJltXZv~uMFf`*y0;zsA~7JU5bRR7`M)b jsM65Y|6f0BYx}{$TU>U-Ea2&I2;{EHy*q_UkDmW8># Date: Fri, 29 Apr 2022 11:31:20 +0200 Subject: [PATCH 024/223] Add query scheduler to "Read Resources" mixin dashboard (#6028) This component has been around for a while but has not been added to the loki-mixin yet. Signed-off-by: Christian Haudum --- .../dashboards/loki-reads-resources.libsonnet | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet index 88a16f87e8cf0..20eec23e287b6 100644 --- a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet +++ b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet @@ -32,6 +32,18 @@ local utils = import 'mixin-utils/utils.libsonnet'; $.goHeapInUsePanel('Memory (go heap inuse)', 'query-frontend'), ) ) + .addRow( + $.row('Query Scheduler') + .addPanel( + $.containerCPUUsagePanel('CPU', 'query-scheduler'), + ) + .addPanel( + $.containerMemoryWorkingSetPanel('Memory (workingset)', 'query-scheduler'), + ) + .addPanel( + $.goHeapInUsePanel('Memory (go heap inuse)', 'query-scheduler'), + ) + ) .addRow( $.row('Querier') .addPanel( From f031427bec4a4c5fc7bb0a6e77e6acd734336b36 Mon Sep 17 00:00:00 2001 From: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Date: Fri, 29 Apr 2022 02:32:02 -0700 Subject: [PATCH 025/223] Docs: PR 5387 redo - Warn ECS users to avoid plain text creds (#6051) --- docs/sources/clients/aws/ecs/_index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/clients/aws/ecs/_index.md b/docs/sources/clients/aws/ecs/_index.md index fab36057cd155..d1f94fb76d1ef 100644 --- a/docs/sources/clients/aws/ecs/_index.md +++ b/docs/sources/clients/aws/ecs/_index.md @@ -150,6 +150,8 @@ The second container is our `sample-app`, a simple [alpine][alpine] container th Go ahead and replace the `Url` property with your [GrafanaCloud][GrafanaCloud] credentials, you can find them in your [account][grafanacloud account] in the Loki instance page. If you're running your own Loki instance replace completely the URL (e.g `http://my-loki.com:3100/loki/api/v1/push`). +We include plain text credentials in `options` for simplicity. However, this exposes credentials in your ECS task definition and in any version-controlled configuration. Mitigate this issue by using a secret store such as [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html), combined with the `secretOptions` configuration option for [injecting sensitive data in a log configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html#secrets-logconfig). + All `options` of the `logConfiguration` will be automatically translated into [fluentbit ouput][fluentbit ouput]. For example, the above options will produce this fluent bit `OUTPUT` config section: ```conf From d0160cb09c1e9f6fb3ddec1054a81634317f7a49 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Fri, 29 Apr 2022 11:33:38 +0200 Subject: [PATCH 026/223] Upgrade inet.af/netaddr (#6046) Signed-off-by: Arve Knudsen --- go.mod | 6 +- go.sum | 13 +-- .../unsafe/assume-no-moving-gc/untested.go | 3 +- vendor/inet.af/netaddr/fuzz.go | 18 ++- vendor/inet.af/netaddr/ipset.go | 63 +++++++++-- vendor/inet.af/netaddr/netaddr.go | 107 ++++++++++++++---- vendor/modules.txt | 6 +- 7 files changed, 161 insertions(+), 55 deletions(-) diff --git a/go.mod b/go.mod index 4dcca277a97ac..7c55aa74f1bc7 100644 --- a/go.mod +++ b/go.mod @@ -106,7 +106,7 @@ require ( gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b - inet.af/netaddr v0.0.0-20210707202901-70468d781e6c + inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 k8s.io/klog v1.0.0 ) @@ -256,8 +256,8 @@ require ( go.opentelemetry.io/otel/trace v1.4.1 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect - go4.org/intern v0.0.0-20210108033219-3eb7198706b2 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 // indirect + go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect diff --git a/go.sum b/go.sum index 75f66499f570c..74527af9d9af2 100644 --- a/go.sum +++ b/go.sum @@ -2024,11 +2024,10 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go4.org/intern v0.0.0-20210108033219-3eb7198706b2 h1:VFTf+jjIgsldaz/Mr00VaCSswHJrI2hIjQygE/W4IMg= -go4.org/intern v0.0.0-20210108033219-3eb7198706b2/go.mod h1:vLqJ+12kCw61iCWsPto0EOHhBS+o4rO5VIucbc9g2Cc= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222175341-b30ae309168e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 h1:1tk03FUNpulq2cuWpXZWj649rwJpk0d20rxWiopKRmc= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 h1:Tx9kY6yUkLge/pFG7IEMwDZy6CS2ajFc9TvQdPCW0uA= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2770,8 +2769,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -inet.af/netaddr v0.0.0-20210707202901-70468d781e6c h1:ZNUX2CiFwNbN1VFaD4MQFmC8o5Rxc7BQW1P1K8kMpbE= -inet.af/netaddr v0.0.0-20210707202901-70468d781e6c/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls= +inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= +inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= diff --git a/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go b/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go index b7df7df71ca78..01377f77eacb9 100644 --- a/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go +++ b/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.18 +//go:build go1.19 +// +build go1.19 package assume_no_moving_gc diff --git a/vendor/inet.af/netaddr/fuzz.go b/vendor/inet.af/netaddr/fuzz.go index 06d90a53b923a..cf1836dc06395 100644 --- a/vendor/inet.af/netaddr/fuzz.go +++ b/vendor/inet.af/netaddr/fuzz.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gofuzz // +build gofuzz package netaddr @@ -129,13 +130,18 @@ func checkBinaryMarshaller(x encoding.BinaryMarshaler) { } } -type appendMarshaller interface { +// fuzzAppendMarshaler is identical to appendMarshaler, defined in netaddr_test.go. +// We have two because the two go-fuzz implementations differ +// in whether they include _test.go files when typechecking. +// We need this fuzz file to compile with and without netaddr_test.go, +// which means defining the interface twice. +type fuzzAppendMarshaler interface { encoding.TextMarshaler AppendTo([]byte) []byte } // checkTextMarshalMatchesAppendTo checks that x's MarshalText matches x's AppendTo. -func checkTextMarshalMatchesAppendTo(x appendMarshaller) { +func checkTextMarshalMatchesAppendTo(x fuzzAppendMarshaler) { buf, err := x.MarshalText() if err != nil { panic(err) @@ -154,8 +160,8 @@ func parseIPPort(s string) (interface{}, error) { return ParseIPPort(s) } func parseIPPrefix(s string) (interface{}, error) { return ParseIPPrefix(s) } func checkStringParseRoundTrip(x fmt.Stringer, parse func(string) (interface{}, error)) { - v, vok := x.(interface{ Valid() bool }) - if vok && !v.Valid() { + v, vok := x.(interface{ IsValid() bool }) + if vok && !v.IsValid() { // Ignore invalid values. return } @@ -163,7 +169,7 @@ func checkStringParseRoundTrip(x fmt.Stringer, parse func(string) (interface{}, // The exception is if they have a Valid method and that Valid method // explicitly says that the zero value is valid. z, zok := x.(interface{ IsZero() bool }) - if zok && z.IsZero() && !(vok && v.Valid()) { + if zok && z.IsZero() && !(vok && v.IsValid()) { return } s := x.String() @@ -189,7 +195,7 @@ func checkEncoding(x interface{}) { if bm, ok := x.(encoding.BinaryMarshaler); ok { checkBinaryMarshaller(bm) } - if am, ok := x.(appendMarshaller); ok { + if am, ok := x.(fuzzAppendMarshaler); ok { checkTextMarshalMatchesAppendTo(am) } } diff --git a/vendor/inet.af/netaddr/ipset.go b/vendor/inet.af/netaddr/ipset.go index f6e258cc2f9f0..b448e25f9a8e9 100644 --- a/vendor/inet.af/netaddr/ipset.go +++ b/vendor/inet.af/netaddr/ipset.go @@ -5,8 +5,8 @@ package netaddr import ( - "errors" "fmt" + "runtime" "sort" "strings" ) @@ -64,7 +64,7 @@ func (s *IPSetBuilder) normalize() { } switch { - case !rout.Valid() || !rin.Valid(): + case !rout.IsValid() || !rin.IsValid(): // mergeIPRanges should have prevented invalid ranges from // sneaking in. panic("invalid IPRanges during Ranges merge") @@ -168,9 +168,22 @@ func (s *IPSetBuilder) Clone() *IPSetBuilder { } } +func (s *IPSetBuilder) addError(msg string, args ...interface{}) { + se := new(stacktraceErr) + // Skip three frames: runtime.Callers, addError, and the IPSetBuilder + // method that called addError (such as IPSetBuilder.Add). + // The resulting stack trace ends at the line in the user's + // code where they called into netaddr. + n := runtime.Callers(3, se.pcs[:]) + se.at = se.pcs[:n] + se.err = fmt.Errorf(msg, args...) + s.errs = append(s.errs, se) +} + // Add adds ip to s. func (s *IPSetBuilder) Add(ip IP) { if ip.IsZero() { + s.addError("Add(IP{})") return } s.AddRange(IPRangeFrom(ip, ip)) @@ -178,18 +191,18 @@ func (s *IPSetBuilder) Add(ip IP) { // AddPrefix adds all IPs in p to s. func (s *IPSetBuilder) AddPrefix(p IPPrefix) { - if r := p.Range(); r.Valid() { + if r := p.Range(); r.IsValid() { s.AddRange(r) } else { - s.errs = append(s.errs, fmt.Errorf("AddPrefix of invalid prefix %q", p)) + s.addError("AddPrefix(%v/%v)", p.IP(), p.Bits()) } } // AddRange adds r to s. // If r is not Valid, AddRange does nothing. func (s *IPSetBuilder) AddRange(r IPRange) { - if !r.Valid() { - s.errs = append(s.errs, fmt.Errorf("AddRange of invalid range %q", r)) + if !r.IsValid() { + s.addError("AddRange(%v-%v)", r.From(), r.To()) return } // If there are any removals (s.out), then we need to compact the set @@ -213,7 +226,7 @@ func (s *IPSetBuilder) AddSet(b *IPSet) { // Remove removes ip from s. func (s *IPSetBuilder) Remove(ip IP) { if ip.IsZero() { - s.errs = append(s.errs, errors.New("ignored Remove of zero IP")) + s.addError("Remove(IP{})") } else { s.RemoveRange(IPRangeFrom(ip, ip)) } @@ -221,19 +234,19 @@ func (s *IPSetBuilder) Remove(ip IP) { // RemovePrefix removes all IPs in p from s. func (s *IPSetBuilder) RemovePrefix(p IPPrefix) { - if r := p.Range(); r.Valid() { + if r := p.Range(); r.IsValid() { s.RemoveRange(r) } else { - s.errs = append(s.errs, fmt.Errorf("RemovePrefix of invalid prefix %q", p)) + s.addError("RemovePrefix(%v/%v)", p.IP(), p.Bits()) } } // RemoveRange removes all IPs in r from s. func (s *IPSetBuilder) RemoveRange(r IPRange) { - if r.Valid() { + if r.IsValid() { s.out = append(s.out, r) } else { - s.errs = append(s.errs, fmt.Errorf("RemoveRange of invalid range %q", r)) + s.addError("RemoveRange(%v-%v)", r.From(), r.To()) } } @@ -454,5 +467,31 @@ func (e multiErr) Error() string { for _, err := range e { ret = append(ret, err.Error()) } - return strings.Join(ret, ", ") + return strings.Join(ret, "; ") +} + +// A stacktraceErr combines an error with a stack trace. +type stacktraceErr struct { + pcs [16]uintptr // preallocated array of PCs + at []uintptr // stack trace whence the error + err error // underlying error +} + +func (e *stacktraceErr) Error() string { + frames := runtime.CallersFrames(e.at) + buf := new(strings.Builder) + buf.WriteString(e.err.Error()) + buf.WriteString(" @ ") + for { + frame, more := frames.Next() + if !more { + break + } + fmt.Fprintf(buf, "%s:%d ", frame.File, frame.Line) + } + return strings.TrimSpace(buf.String()) +} + +func (e *stacktraceErr) Unwrap() error { + return e.err } diff --git a/vendor/inet.af/netaddr/netaddr.go b/vendor/inet.af/netaddr/netaddr.go index 6b747df9aecae..97684063324ab 100644 --- a/vendor/inet.af/netaddr/netaddr.go +++ b/vendor/inet.af/netaddr/netaddr.go @@ -821,7 +821,7 @@ func (ip IP) Prior() IP { func (ip IP) String() string { switch ip.z { case z0: - return "invalid IP" + return "zero IP" case z4: return ip.string4() default: @@ -1004,7 +1004,7 @@ func (ip IP) MarshalText() ([]byte, error) { // It returns an error if *ip is not the IP zero value. func (ip *IP) UnmarshalText(text []byte) error { if ip.z != z0 { - return errors.New("netaddr: refusing to Unmarshal into non-zero IP") + return errors.New("refusing to Unmarshal into non-zero IP") } if len(text) == 0 { return nil @@ -1035,7 +1035,7 @@ func (ip IP) MarshalBinary() ([]byte, error) { // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. func (ip *IP) UnmarshalBinary(b []byte) error { if ip.z != z0 { - return errors.New("netaddr: refusing to Unmarshal into non-zero IP") + return errors.New("refusing to Unmarshal into non-zero IP") } n := len(b) switch { @@ -1051,7 +1051,7 @@ func (ip *IP) UnmarshalBinary(b []byte) error { *ip = ipv6Slice(b[:16]).WithZone(string(b[16:])) return nil } - return fmt.Errorf("netaddr: unexpected ip size: %v", len(b)) + return fmt.Errorf("unexpected ip size: %v", len(b)) } // IPPort is an IP and a port number. @@ -1209,7 +1209,7 @@ func (p IPPort) MarshalText() ([]byte, error) { // value. func (p *IPPort) UnmarshalText(text []byte) error { if p.ip.z != z0 || p.port != 0 { - return errors.New("netaddr: refusing to UnmarshalText into non-zero IP") + return errors.New("refusing to Unmarshal into non-zero IPPort") } if len(text) == 0 { return nil @@ -1282,7 +1282,7 @@ type IPPrefix struct { bits uint8 } -// IPPrefixFrom returns an IPPrefix with IP ip and port port. +// IPPrefixFrom returns an IPPrefix with IP ip and provided bits prefix length. // It does not allocate. func IPPrefixFrom(ip IP, bits uint8) IPPrefix { return IPPrefix{ @@ -1401,7 +1401,7 @@ func (p IPPrefix) Range() IPRange { // The returned value is always non-nil. // Any zone identifier is dropped in the conversion. func (p IPPrefix) IPNet() *net.IPNet { - if !p.Valid() { + if !p.IsValid() { return &net.IPNet{} } stdIP, _ := p.ip.ipZone(nil) @@ -1419,7 +1419,7 @@ func (p IPPrefix) IPNet() *net.IPNet { // If ip has an IPv6 zone, Contains returns false, // because IPPrefixes strip zones. func (p IPPrefix) Contains(ip IP) bool { - if !p.Valid() || ip.hasZone() { + if !p.IsValid() || ip.hasZone() { return false } if f1, f2 := p.ip.BitLen(), ip.BitLen(); f1 == 0 || f2 == 0 || f1 != f2 { @@ -1451,7 +1451,7 @@ func (p IPPrefix) Contains(ip IP) bool { // // If either has a Bits of zero, it returns true. func (p IPPrefix) Overlaps(o IPPrefix) bool { - if !p.Valid() || !o.Valid() { + if !p.IsValid() || !o.IsValid() { return false } if p == o { @@ -1490,7 +1490,7 @@ func (p IPPrefix) AppendTo(b []byte) []byte { if p.IsZero() { return b } - if !p.Valid() { + if !p.IsValid() { return append(b, "invalid IPPrefix"...) } @@ -1528,13 +1528,11 @@ func (p IPPrefix) MarshalText() ([]byte, error) { // It returns an error if *p is not the IPPrefix zero value. func (p *IPPrefix) UnmarshalText(text []byte) error { if *p != (IPPrefix{}) { - return errors.New("netaddr: refusing to Unmarshal into non-zero IPPrefix") + return errors.New("refusing to Unmarshal into non-zero IPPrefix") } - if len(text) == 0 { return nil } - var err error *p, err = ParseIPPrefix(string(text)) return err @@ -1542,7 +1540,10 @@ func (p *IPPrefix) UnmarshalText(text []byte) error { // String returns the CIDR notation of p: "/". func (p IPPrefix) String() string { - if !p.Valid() { + if p.IsZero() { + return "zero IPPrefix" + } + if !p.IsValid() { return "invalid IPPrefix" } return fmt.Sprintf("%s/%d", p.ip, p.bits) @@ -1550,7 +1551,7 @@ func (p IPPrefix) String() string { // lastIP returns the last IP in the prefix. func (p IPPrefix) lastIP() IP { - if !p.Valid() { + if !p.IsValid() { return IP{} } a16 := p.ip.As16() @@ -1625,19 +1626,29 @@ func ParseIPRange(s string) (IPRange, error) { return r, fmt.Errorf("invalid To IP %q in range %q", to, s) } r.to = r.to.withoutZone() - if !r.Valid() { + if !r.IsValid() { return r, fmt.Errorf("range %v to %v not valid", r.from, r.to) } return r, nil } +// MustParseIPRange calls ParseIPRange(s) and panics on error. +// It is intended for use in tests with hard-coded strings. +func MustParseIPRange(s string) IPRange { + r, err := ParseIPRange(s) + if err != nil { + panic(err) + } + return r +} + // String returns a string representation of the range. // // For a valid range, the form is "From-To" with a single hyphen // separating the IPs, the same format recognized by // ParseIPRange. func (r IPRange) String() string { - if r.Valid() { + if r.IsValid() { return fmt.Sprintf("%s-%s", r.from, r.to) } if r.from.IsZero() || r.to.IsZero() { @@ -1646,6 +1657,56 @@ func (r IPRange) String() string { return "invalid IPRange" } +// AppendTo appends a text encoding of r, +// as generated by MarshalText, +// to b and returns the extended buffer. +func (r IPRange) AppendTo(b []byte) []byte { + if r.IsZero() { + return b + } + b = r.from.AppendTo(b) + b = append(b, '-') + b = r.to.AppendTo(b) + return b +} + +// MarshalText implements the encoding.TextMarshaler interface, +// The encoding is the same as returned by String, with one exception: +// If ip is the zero value, the encoding is the empty string. +func (r IPRange) MarshalText() ([]byte, error) { + if r.IsZero() { + return []byte(""), nil + } + var max int + if r.from.z == z4 { + max = len("255.255.255.255-255.255.255.255") + } else { + max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff-ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") + } + b := make([]byte, 0, max) + return r.AppendTo(b), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The IP range is expected in a form accepted by ParseIPRange. +// It returns an error if *r is not the IPRange zero value. +func (r *IPRange) UnmarshalText(text []byte) error { + if *r != (IPRange{}) { + return errors.New("refusing to Unmarshal into non-zero IPRange") + } + if len(text) == 0 { + return nil + } + var err error + *r, err = ParseIPRange(string(text)) + return err +} + +// IsZero reports whether r is the zero value of the IPRange type. +func (r IPRange) IsZero() bool { + return r == IPRange{} +} + // IsValid reports whether r.From() and r.To() are both non-zero and // obey the documented requirements: address families match, and From // is less than or equal to To. @@ -1669,7 +1730,7 @@ func (r IPRange) Valid() bool { return r.IsValid() } // If ip has an IPv6 zone, Contains returns false, // because IPPrefixes strip zones. func (r IPRange) Contains(addr IP) bool { - return r.Valid() && !addr.hasZone() && r.contains(addr) + return r.IsValid() && !addr.hasZone() && r.contains(addr) } // contains is like Contains, but without the validity check. @@ -1736,7 +1797,7 @@ func mergeIPRanges(rr []IPRange) (out []IPRange, valid bool) { for _, r := range rr[1:] { prev := &out[len(out)-1] switch { - case !r.Valid(): + case !r.IsValid(): // Invalid ranges make no sense to merge, refuse to // perform. return nil, false @@ -1778,8 +1839,8 @@ func mergeIPRanges(rr []IPRange) (out []IPRange, valid bool) { // If p and o are of different address families or either are invalid, // it reports false. func (r IPRange) Overlaps(o IPRange) bool { - return r.Valid() && - o.Valid() && + return r.IsValid() && + o.IsValid() && r.from.Compare(o.to) <= 0 && o.from.Compare(r.to) <= 0 } @@ -1802,7 +1863,7 @@ func (r IPRange) Prefixes() []IPPrefix { // AppendPrefixes is an append version of IPRange.Prefixes. It appends // the IPPrefix entries that cover r to dst. func (r IPRange) AppendPrefixes(dst []IPPrefix) []IPPrefix { - if !r.Valid() { + if !r.IsValid() { return nil } return appendRangePrefixes(dst, r.prefixFrom128AndBits, r.from.addr, r.to.addr) @@ -1835,7 +1896,7 @@ func comparePrefixes(a, b uint128) (common uint8, aZeroBSet bool) { // Prefix returns r as an IPPrefix, if it can be presented exactly as such. // If r is not valid or is not exactly equal to one prefix, ok is false. func (r IPRange) Prefix() (p IPPrefix, ok bool) { - if !r.Valid() { + if !r.IsValid() { return } if common, ok := comparePrefixes(r.from.addr, r.to.addr); ok { diff --git a/vendor/modules.txt b/vendor/modules.txt index 5bb151bfe5e2b..83fa0a21dbc03 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1156,10 +1156,10 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# go4.org/intern v0.0.0-20210108033219-3eb7198706b2 +# go4.org/intern v0.0.0-20211027215823-ae77deb06f29 ## explicit; go 1.13 go4.org/intern -# go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 +# go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 ## explicit; go 1.11 go4.org/unsafe/assume-no-moving-gc # golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 @@ -1423,7 +1423,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3 -# inet.af/netaddr v0.0.0-20210707202901-70468d781e6c +# inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 ## explicit; go 1.12 inet.af/netaddr # k8s.io/api v0.22.7 => k8s.io/api v0.21.0 From d73df0834ad3627e26d27d3d59ac5ee8e71bd701 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 29 Apr 2022 11:38:08 +0200 Subject: [PATCH 027/223] Improve series store index queries (#6045) --- .../series/index/caching_index_client.go | 15 ++------ .../stores/series/index/schema_util.go | 22 ++++++------ .../stores/series/series_index_store.go | 34 ++++++++++--------- .../stores/series/series_store_utils.go | 11 ------ 4 files changed, 31 insertions(+), 51 deletions(-) diff --git a/pkg/storage/stores/series/index/caching_index_client.go b/pkg/storage/stores/series/index/caching_index_client.go index bde219465df4e..79461659995ee 100644 --- a/pkg/storage/stores/series/index/caching_index_client.go +++ b/pkg/storage/stores/series/index/caching_index_client.go @@ -16,7 +16,6 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/cache" util_log "github.com/grafana/loki/pkg/util/log" - "github.com/grafana/loki/pkg/util/spanlogger" ) var ( @@ -322,10 +321,6 @@ func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batc } func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (batches []ReadBatch, missed []string) { - spanLogger := spanlogger.FromContext(ctx) - logger := util_log.WithContext(ctx, s.logger) - level.Debug(spanLogger).Log("requested", len(keys)) - cacheGets.Add(float64(len(keys))) // Build a map from hash -> key; NB there can be collisions here; we'll fetch @@ -354,7 +349,7 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat var readBatch ReadBatch if err := proto.Unmarshal(bufs[j], &readBatch); err != nil { - level.Warn(spanLogger).Log("msg", "error unmarshalling index entry from cache", "err", err) + level.Warn(util_log.Logger).Log("msg", "error unmarshalling index entry from cache", "err", err) cacheCorruptErrs.Inc() continue } @@ -362,7 +357,7 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat // Make sure the hash(key) is not a collision in the cache by looking at the // key in the value. if key != readBatch.Key { - level.Debug(spanLogger).Log("msg", "dropping index cache entry due to key collision", "key", key, "readBatch.Key", readBatch.Key, "expiry") + level.Debug(util_log.Logger).Log("msg", "dropping index cache entry due to key collision", "key", key, "readBatch.Key", readBatch.Key, "expiry") continue } @@ -370,11 +365,6 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat continue } - if len(readBatch.Entries) != 0 { - // not using spanLogger to avoid over-inflating traces since the query count can go much higher - level.Debug(logger).Log("msg", "found index cache entries", "key", key, "count", len(readBatch.Entries)) - } - cacheHits.Inc() batches = append(batches, readBatch) } @@ -392,6 +382,5 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat missed = append(missed, miss) } - level.Debug(spanLogger).Log("hits", len(batches), "misses", len(misses)) return batches, missed } diff --git a/pkg/storage/stores/series/index/schema_util.go b/pkg/storage/stores/series/index/schema_util.go index 0a04e5c616584..460c1adb193ea 100644 --- a/pkg/storage/stores/series/index/schema_util.go +++ b/pkg/storage/stores/series/index/schema_util.go @@ -195,8 +195,8 @@ func ParseChunkTimeRangeValue(rangeValue []byte, value []byte) ( // v1 & v2 schema had three components - label name, label value and chunk ID. // No version number. case len(components) == 3: - chunkID = string(components[2]) - labelValue = model.LabelValue(components[1]) + chunkID = yoloString(components[2]) + labelValue = model.LabelValue(yoloString(components[1])) return case len(components[3]) == 1: @@ -205,42 +205,42 @@ func ParseChunkTimeRangeValue(rangeValue []byte, value []byte) ( // "version" is 1 and label value is base64 encoded. // (older code wrote "version" as 1, not '1') case chunkTimeRangeKeyV1a, chunkTimeRangeKeyV1: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) labelValue, err = decodeBase64Value(components[1]) return // v4 schema wrote v3 range keys and a new range key - version 2, // with four components - , , chunk ID and version. case chunkTimeRangeKeyV2: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) return // v5 schema version 3 range key is chunk end time, , chunk ID, version case chunkTimeRangeKeyV3: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) return // v5 schema version 4 range key is chunk end time, label value, chunk ID, version case chunkTimeRangeKeyV4: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) labelValue, err = decodeBase64Value(components[1]) return // v6 schema added version 5 range keys, which have the label value written in // to the value, not the range key. So they are [chunk end time, , chunk ID, version]. case chunkTimeRangeKeyV5: - chunkID = string(components[2]) - labelValue = model.LabelValue(value) + chunkID = yoloString(components[2]) + labelValue = model.LabelValue(yoloString(value)) return // v9 schema actually return series IDs case seriesRangeKeyV1: - chunkID = string(components[0]) + chunkID = yoloString(components[0]) return case labelSeriesRangeKeyV1: - chunkID = string(components[1]) - labelValue = model.LabelValue(value) + chunkID = yoloString(components[1]) + labelValue = model.LabelValue(yoloString(value)) return } } diff --git a/pkg/storage/stores/series/series_index_store.go b/pkg/storage/stores/series/series_index_store.go index c4fc5b36b56f7..e9bf5bce79fd7 100644 --- a/pkg/storage/stores/series/series_index_store.go +++ b/pkg/storage/stores/series/series_index_store.go @@ -251,6 +251,8 @@ func (c *IndexStore) LabelValuesForMetricName(ctx context.Context, userID string if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) var result util.UniqueStrings for _, entry := range entries { @@ -289,6 +291,8 @@ func (c *IndexStore) labelValuesForMetricNameWithMatchers(ctx context.Context, u if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) result := util.NewUniqueStrings(len(entries)) for _, entry := range entries { @@ -409,7 +413,6 @@ func (c *IndexStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, thr if err != nil { return nil, err } - unfilteredQueries := len(queries) if filter != nil { queries = filter(queries) @@ -423,20 +426,13 @@ func (c *IndexStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, thr } else if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) ids, err := parseIndexEntries(ctx, entries, matcher) if err != nil { return nil, err } - level.Debug(util_log.WithContext(ctx, util_log.Logger)). - Log( - "msg", "Store.lookupIdsByMetricNameMatcher", - "matcher", formatMatcher(matcher), - "queries", unfilteredQueries, - "filteredQueries", len(queries), - "entries", len(entries), - "ids", len(ids), - ) return ids, nil } @@ -486,6 +482,12 @@ func parseIndexEntries(_ context.Context, entries []index.Entry, matcher *labels return result, nil } +var entriesPool = sync.Pool{ + New: func() interface{} { + return make([]index.Entry, 0, 1024) + }, +} + func (c *IndexStore) lookupEntriesByQueries(ctx context.Context, queries []index.Query) ([]index.Entry, error) { // Nothing to do if there are no queries. if len(queries) == 0 { @@ -493,7 +495,7 @@ func (c *IndexStore) lookupEntriesByQueries(ctx context.Context, queries []index } var lock sync.Mutex - var entries []index.Entry + entries := entriesPool.Get().([]index.Entry)[:0] err := c.index.QueryPages(ctx, queries, func(query index.Query, resp index.ReadBatchResult) bool { iter := resp.Iterator() lock.Lock() @@ -532,6 +534,9 @@ func (c *IndexStore) lookupLabelNamesBySeries(ctx context.Context, from, through if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) + level.Debug(log).Log("entries", len(entries)) var result util.UniqueStrings @@ -595,11 +600,8 @@ func (c *IndexStore) lookupChunksBySeries(ctx context.Context, from, through mod if err != nil { return nil, err } - level.Debug(util_log.WithContext(ctx, util_log.Logger)).Log( - "msg", "SeriesStore.lookupChunksBySeries", - "seriesIDs", len(seriesIDs), - "queries", len(queries), - "entries", len(entries)) + // nolint:staticcheck + defer entriesPool.Put(entries) result, err := parseIndexEntries(ctx, entries, nil) return result, err diff --git a/pkg/storage/stores/series/series_store_utils.go b/pkg/storage/stores/series/series_store_utils.go index a0cbaf307a2de..317e0547bdd0d 100644 --- a/pkg/storage/stores/series/series_store_utils.go +++ b/pkg/storage/stores/series/series_store_utils.go @@ -5,7 +5,6 @@ import ( "unicode/utf8" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/storage/chunk" @@ -171,13 +170,3 @@ func FindSetMatches(pattern string) []string { } return matches } - -// Using this function avoids logging of nil matcher, which works, but indirectly via panic and recover. -// That confuses attached debugger, which wants to breakpoint on each panic. -// Using simple check is also faster. -func formatMatcher(matcher *labels.Matcher) string { - if matcher == nil { - return "nil" - } - return matcher.String() -} From 8c03d2389c60251a781cf67c19b199ca4fa9a39e Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Fri, 29 Apr 2022 13:02:09 +0200 Subject: [PATCH 028/223] operator: Merge gh index page with README.md to link to docs (#6048) --- operator/README.md | 37 ++++++++++++++++++++++--------------- operator/index.md | 31 ------------------------------- 2 files changed, 22 insertions(+), 46 deletions(-) delete mode 100644 operator/index.md diff --git a/operator/README.md b/operator/README.md index d36d5c668d504..083cc93166076 100644 --- a/operator/README.md +++ b/operator/README.md @@ -7,23 +7,30 @@ provided by the Grafana Loki SIG operator. **This is currently a work in progress and is subject to large scale changes that will break any dependencies. Do not use this in any production environment.** -## Development +### Hacking on Loki Operator on kind or OpenShift -Requirements: +* If you want to contribute to this repository, you might need a step-by-step guide on how to start [hacking on Loki-operator with kind](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-using-kind). +* Also, there is a step-by-step guide on how to test Loki-operator on [OpenShift](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift). +* There is also a [basic troubleshooting guide](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#basic-troubleshooting-on-hacking-on-loki-operator) if you run into some common problems. +* There is also a [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_operator_make_run.md) which demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift using the `make run` command. - 1. Running Kubernetes cluster. Our team uses - [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/) or - [K3s](https://k3s.io/) for simplicity. - 1. A container registry that you and your Kubernetes cluster can reach. We - recommend [quay.io](https://quay.io/signin/). +### Sending Logs to Loki -Build and push the container image and then deploy the operator with `make -oci-build oci-push deploy IMG=quay.io/my-team/loki-operator:latest`. This will -deploy to your active Kubernetes/OpenShift cluster defined by your local -[kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). +#### Sending Logs Through the Gateway Component -For detailed step-by-step guide on how to start development and testing on Kind and OpenShift, -check our [documentation](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md) +* The [forwarding logs to LokiStack guide](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md) provides instructions for configuring forwarding clients to ship logs to Loki through the gateway component. +* This section details [how to connect to OpenShift Logging](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#openshift-logging) installation to the gateway. +* This section details [how to connect a Promtail](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#promtail) installation to the gateway. +* This section details [how to connect a Grafana Fluentd plugin](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#fluentd) installation to the gateway. -Also, there is a [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_operator_make_run.md) which -demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift. +#### Sending Logs Directly to the Distributor Component + +* The [forwarding logs to LokiStack without LokiStack Gateway](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_without_gateway.md) is used to send application, infrastructure, and audit logs to the Loki Distributor as different tenants using Fluentd or Vector. +* The guide has a step-by-step guide to connect with OpenShift Logging for forwarding logs to LokiStack. + +### Installation of Storage Size Calculator on OpenShift + +* Storage size calculator works out of the box on OpenShift. For non-openshift distributions you will need to create services like prometheus, serviceMonitor, scrape configuration for log-file-metric exporter, promsecret to access the custom prometheus URL, token. +* The step-by-step guide on how to install [storage size calculator](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md) on OpenShift is available. +* Also, there is a step-by-step guide on how to [contribute](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md#contribution) to this along with local development and testing procedure. +* There is also a [basic troubleshooting guide](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md#troubleshooting) if you run into some common problems. diff --git a/operator/index.md b/operator/index.md deleted file mode 100644 index 224e6f0348878..0000000000000 --- a/operator/index.md +++ /dev/null @@ -1,31 +0,0 @@ -## Welcome to Loki Operator - -This is the Kubernetes Operator for Loki provided by the Grafana Loki SIG operator. This is currently a work in progress and is subject to large scale changes that will break any dependencies. Do not use this in any production environment. - -### Hacking on Loki Operator on kind or OpenShift - -* If you want to contribute to this repository, you might need a step-by-step guide on how to start [hacking on Loki-operator with kind](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-using-kind). -* Also, there is a step-by-step guide on how to test Loki-operator on [OpenShift](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift). -* There is also a [basic troubleshooting guide](https://github.com/grafana/loki/blob/master/operator/docs/hack_loki_operator.md#basic-troubleshooting-on-hacking-on-loki-operator) if you run into some common problems. -* There is also a [document](https://github.com/grafana/loki/blob/master/operator/docs/hack_operator_make_run.md) which demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift using the `make run` command. - -### Sending Logs to Loki - -#### Sending Logs Through the Gateway Component - -* The [forwarding logs to LokiStack guide](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md) provides instructions for configuring forwarding clients to ship logs to Loki through the gateway component. -* This section details [how to connect to OpenShift Logging](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#openshift-logging) installation to the gateway. -* This section details [how to connect a Promtail](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#promtail) installation to the gateway. -* This section details [how to connect a Grafana Fluentd plugin](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_to_gateway.md#fluentd) installation to the gateway. - -#### Sending Logs Directly to the Distributor Component - -* The [forwarding logs to LokiStack without LokiStack Gateway](https://github.com/grafana/loki/tree/master/operator/docs/forwarding_logs_without_gateway.md) is used to send application, infrastructure, and audit logs to the Loki Distributor as different tenants using Fluentd or Vector. -* The guide has a step-by-step guide to connect with OpenShift Logging for forwarding logs to LokiStack. - -### Installation of Storage Size Calculator on OpenShift - -* Storage size calculator works out of the box on OpenShift. For non-openshift distributions you will need to create services like prometheus, serviceMonitor, scrape configuration for log-file-metric exporter, promsecret to access the custom prometheus URL, token. -* The step-by-step guide on how to install [storage size calculator](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md) on OpenShift is available. -* Also, there is a step-by-step guide on how to [contribute](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md#contribution) to this along with local development and testing procedure. -* There is also a [basic troubleshooting guide](https://github.com/grafana/loki/blob/master/operator/docs/storage_size_calculator.md#troubleshooting) if you run into some common problems. From 477a0e71dfc8b42770f6b36d2e684a232652051b Mon Sep 17 00:00:00 2001 From: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> Date: Fri, 29 Apr 2022 12:55:05 +0100 Subject: [PATCH 029/223] Add filter parameter to rebound so lines can be deleted by the compactor (#5879) * Add filter parameter to rebound Signed-off-by: Michel Hollands * Fix linting issues Signed-off-by: Michel Hollands * Add filter function to delete request Signed-off-by: Michel Hollands * Fix linting issues Signed-off-by: Michel Hollands * Enable api for filter and delete mode Signed-off-by: Michel Hollands * Add settings for retention Signed-off-by: Michel Hollands * Use labels to check and add test Signed-off-by: Michel Hollands * Simplify filter function Signed-off-by: Michel Hollands * Also enable filter mode Signed-off-by: Michel Hollands * Remove test settings in config file for docker Signed-off-by: Michel Hollands * Add extra (unused) param for ProcessString in filter Signed-off-by: Michel Hollands * Empty commit to trigger CI again Signed-off-by: Michel Hollands * Update changelog Signed-off-by: Michel Hollands * Fix flapping test Signed-off-by: Michel Hollands * Remove commented out unit tests and add some more Signed-off-by: Michel Hollands * Add extra test case for delete request without line filter Signed-off-by: Michel Hollands * Use chunk bounds Signed-off-by: Michel Hollands * check if the log selector has a filter if the whole chunk is selected Signed-off-by: Michel Hollands * fix lint issue: use correct go-kit import Signed-off-by: Michel Hollands --- CHANGELOG.md | 1 + pkg/chunkenc/dumb_chunk.go | 3 +- pkg/chunkenc/facade.go | 5 +- pkg/chunkenc/interface.go | 3 +- pkg/chunkenc/memchunk.go | 8 +- pkg/chunkenc/memchunk_test.go | 96 ++++++++++++- pkg/loki/modules.go | 15 +- pkg/storage/chunk/bigchunk.go | 4 +- pkg/storage/chunk/interface.go | 4 +- .../stores/shipper/compactor/compactor.go | 14 +- .../compactor/deletion/delete_request.go | 95 +++++++++++-- .../compactor/deletion/delete_request_test.go | 133 +++++++++++++++--- .../deletion/delete_requests_manager.go | 28 ++-- .../deletion/delete_requests_manager_test.go | 30 ++-- .../shipper/compactor/deletion/validation.go | 15 +- .../compactor/deletion/validation_test.go | 30 ++-- .../shipper/compactor/retention/expiration.go | 15 +- .../compactor/retention/expiration_test.go | 4 +- .../shipper/compactor/retention/retention.go | 30 ++-- .../compactor/retention/retention_test.go | 127 +++++++++++------ pkg/util/filter/filter_function.go | 3 + 21 files changed, 508 insertions(+), 155 deletions(-) create mode 100644 pkg/util/filter/filter_function.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d1565f4e8d5a..b0bb8f1282e01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main * [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. +* [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist. diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go index 3a0e45837bfa2..e0f4cf670ed4b 100644 --- a/pkg/chunkenc/dumb_chunk.go +++ b/pkg/chunkenc/dumb_chunk.go @@ -9,6 +9,7 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/log" + "github.com/grafana/loki/pkg/util/filter" ) const ( @@ -122,7 +123,7 @@ func (c *dumbChunk) Close() error { return nil } -func (c *dumbChunk) Rebound(start, end time.Time) (Chunk, error) { +func (c *dumbChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, error) { return nil, nil } diff --git a/pkg/chunkenc/facade.go b/pkg/chunkenc/facade.go index 36c6331aafe07..1db71c189cd65 100644 --- a/pkg/chunkenc/facade.go +++ b/pkg/chunkenc/facade.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/common/model" "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/util/filter" ) // GzipLogChunk is a cortex encoding type for our chunks. @@ -86,8 +87,8 @@ func (f Facade) LokiChunk() Chunk { return f.c } -func (f Facade) Rebound(start, end model.Time) (chunk.Data, error) { - newChunk, err := f.c.Rebound(start.Time(), end.Time()) +func (f Facade) Rebound(start, end model.Time, filter filter.Func) (chunk.Data, error) { + newChunk, err := f.c.Rebound(start.Time(), end.Time(), filter) if err != nil { return nil, err } diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go index 74fc87c8ed5cb..7ac494c796714 100644 --- a/pkg/chunkenc/interface.go +++ b/pkg/chunkenc/interface.go @@ -11,6 +11,7 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/log" + "github.com/grafana/loki/pkg/util/filter" ) // Errors returned by the chunk interface. @@ -127,7 +128,7 @@ type Chunk interface { CompressedSize() int Close() error Encoding() Encoding - Rebound(start, end time.Time) (Chunk, error) + Rebound(start, end time.Time, filter filter.Func) (Chunk, error) } // Block is a chunk block. diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go index e29e1f25df723..ffb4f179a0de1 100644 --- a/pkg/chunkenc/memchunk.go +++ b/pkg/chunkenc/memchunk.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/loki/pkg/logql/log" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/util/filter" util_log "github.com/grafana/loki/pkg/util/log" ) @@ -713,7 +714,7 @@ func (c *MemChunk) reorder() error { // Otherwise, we need to rebuild the blocks from, to := c.Bounds() - newC, err := c.Rebound(from, to) + newC, err := c.Rebound(from, to, nil) if err != nil { return err } @@ -910,7 +911,7 @@ func (c *MemChunk) Blocks(mintT, maxtT time.Time) []Block { } // Rebound builds a smaller chunk with logs having timestamp from start and end(both inclusive) -func (c *MemChunk) Rebound(start, end time.Time) (Chunk, error) { +func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, error) { // add a millisecond to end time because the Chunk.Iterator considers end time to be non-inclusive. itr, err := c.Iterator(context.Background(), start, end.Add(time.Millisecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{})) if err != nil { @@ -931,6 +932,9 @@ func (c *MemChunk) Rebound(start, end time.Time) (Chunk, error) { for itr.Next() { entry := itr.Entry() + if filter != nil && filter(entry.Line) { + continue + } if err := newChunk.Append(&entry); err != nil { return nil, err } diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index d94259cc2ea42..0b6d7e20d443e 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -1188,7 +1188,7 @@ func TestMemChunk_Rebound(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - newChunk, err := originalChunk.Rebound(tc.sliceFrom, tc.sliceTo) + newChunk, err := originalChunk.Rebound(tc.sliceFrom, tc.sliceTo, nil) if tc.err != nil { require.Equal(t, tc.err, err) return @@ -1231,3 +1231,97 @@ func buildTestMemChunk(t *testing.T, from, through time.Time) *MemChunk { return chk } + +func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) { + chkFrom := time.Unix(1, 0) // headBlock.Append treats Unix time 0 as not set so we have to use a later time + chkFromPlus5 := chkFrom.Add(5 * time.Second) + chkThrough := chkFrom.Add(10 * time.Second) + chkThroughPlus1 := chkThrough.Add(1 * time.Second) + + filterFunc := func(in string) bool { + return strings.HasPrefix(in, "matching") + } + + for _, tc := range []struct { + name string + matchingSliceFrom, matchingSliceTo *time.Time + err error + nrMatching int + nrNotMatching int + }{ + { + name: "no matches", + nrMatching: 0, + nrNotMatching: 10, + }, + { + name: "some lines removed", + matchingSliceFrom: &chkFrom, + matchingSliceTo: &chkFromPlus5, + nrMatching: 5, + nrNotMatching: 5, + }, + { + name: "all lines match", + err: chunk.ErrSliceNoDataInRange, + matchingSliceFrom: &chkFrom, + matchingSliceTo: &chkThroughPlus1, + }, + } { + t.Run(tc.name, func(t *testing.T) { + originalChunk := buildFilterableTestMemChunk(t, chkFrom, chkThrough, tc.matchingSliceFrom, tc.matchingSliceTo) + newChunk, err := originalChunk.Rebound(chkFrom, chkThrough, filterFunc) + if tc.err != nil { + require.Equal(t, tc.err, err) + return + } + require.NoError(t, err) + + // iterate originalChunk from slice start to slice end + nanosecond. Adding a nanosecond here to be inclusive of sample at end time. + originalChunkItr, err := originalChunk.Iterator(context.Background(), chkFrom, chkThrough.Add(time.Nanosecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{})) + require.NoError(t, err) + originalChunkSamples := 0 + for originalChunkItr.Next() { + originalChunkSamples++ + } + require.Equal(t, tc.nrMatching+tc.nrNotMatching, originalChunkSamples) + + // iterate newChunk for whole chunk interval which should include all the samples in the chunk and hence align it with expected values. + newChunkItr, err := newChunk.Iterator(context.Background(), chkFrom, chkThrough.Add(time.Nanosecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{})) + require.NoError(t, err) + newChunkSamples := 0 + for newChunkItr.Next() { + newChunkSamples++ + } + require.Equal(t, tc.nrNotMatching, newChunkSamples) + }) + } +} + +func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matchingFrom, matchingTo *time.Time) *MemChunk { + chk := NewMemChunk(EncGZIP, DefaultHeadBlockFmt, defaultBlockSize, 0) + t.Logf("from : %v", from.String()) + t.Logf("through: %v", through.String()) + for from.Before(through) { + // If a line is between matchingFrom and matchingTo add the prefix "matching" + if matchingFrom != nil && matchingTo != nil && + (from.Equal(*matchingFrom) || (from.After(*matchingFrom) && (from.Before(*matchingTo)))) { + t.Logf("%v matching line", from.String()) + err := chk.Append(&logproto.Entry{ + Line: fmt.Sprintf("matching %v", from.String()), + Timestamp: from, + }) + require.NoError(t, err) + } else { + t.Logf("%v non-match line", from.String()) + err := chk.Append(&logproto.Entry{ + Line: from.String(), + Timestamp: from, + }) + require.NoError(t, err) + } + from = from.Add(time.Second) + } + + return chk +} diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 389d97de954ed..9d36d1e96a5b6 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -795,12 +795,15 @@ func (t *Loki) initCompactor() (services.Service, error) { t.Server.HTTP.Path("/compactor/ring").Methods("GET", "POST").Handler(t.compactor) - if t.Cfg.CompactorConfig.RetentionEnabled && t.compactor.DeleteMode() != deletion.Disabled { - t.Server.HTTP.Path("/loki/api/v1/delete").Methods("PUT", "POST").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.AddDeleteRequestHandler))) - t.Server.HTTP.Path("/loki/api/v1/delete").Methods("GET").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.GetAllDeleteRequestsHandler))) - t.Server.HTTP.Path("/loki/api/v1/delete").Methods("DELETE").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.CancelDeleteRequestHandler))) - - t.Server.HTTP.Path("/loki/api/v1/cache/generation_numbers").Methods("GET").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.GetCacheGenerationNumberHandler))) + if t.Cfg.CompactorConfig.RetentionEnabled { + switch t.compactor.DeleteMode() { + case deletion.WholeStreamDeletion, deletion.FilterOnly, deletion.FilterAndDelete: + t.Server.HTTP.Path("/loki/api/v1/delete").Methods("PUT", "POST").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.AddDeleteRequestHandler))) + t.Server.HTTP.Path("/loki/api/v1/delete").Methods("GET").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.GetAllDeleteRequestsHandler))) + t.Server.HTTP.Path("/loki/api/v1/delete").Methods("DELETE").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.CancelDeleteRequestHandler))) + default: + break + } } return t.compactor, nil diff --git a/pkg/storage/chunk/bigchunk.go b/pkg/storage/chunk/bigchunk.go index 70b188faaad24..f131a23f7f803 100644 --- a/pkg/storage/chunk/bigchunk.go +++ b/pkg/storage/chunk/bigchunk.go @@ -8,6 +8,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/tsdb/chunkenc" + + "github.com/grafana/loki/pkg/util/filter" ) const samplesPerChunk = 120 @@ -85,7 +87,7 @@ func (b *bigchunk) addNextChunk(start model.Time) error { return nil } -func (b *bigchunk) Rebound(start, end model.Time) (Data, error) { +func (b *bigchunk) Rebound(start, end model.Time, filter filter.Func) (Data, error) { return nil, errors.New("not implemented") } diff --git a/pkg/storage/chunk/interface.go b/pkg/storage/chunk/interface.go index be5f83211076b..159a8a788ec53 100644 --- a/pkg/storage/chunk/interface.go +++ b/pkg/storage/chunk/interface.go @@ -23,6 +23,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" errs "github.com/weaveworks/common/errors" + + "github.com/grafana/loki/pkg/util/filter" ) const ( @@ -47,7 +49,7 @@ type Data interface { // Rebound returns a smaller chunk that includes all samples between start and end (inclusive). // We do not want to change existing Slice implementations because // it is built specifically for query optimization and is a noop for some of the encodings. - Rebound(start, end model.Time) (Data, error) + Rebound(start, end model.Time, filter filter.Func) (Data, error) // Size returns the approximate length of the chunk in bytes. Size() int Utilization() float64 diff --git a/pkg/storage/stores/shipper/compactor/compactor.go b/pkg/storage/stores/shipper/compactor/compactor.go index fdffe742838f7..a6e250bae7174 100644 --- a/pkg/storage/stores/shipper/compactor/compactor.go +++ b/pkg/storage/stores/shipper/compactor/compactor.go @@ -229,7 +229,8 @@ func (c *Compactor) init(storageConfig storage.Config, schemaConfig config.Schem return err } - if c.deleteMode != deletion.Disabled { + switch c.deleteMode { + case deletion.WholeStreamDeletion, deletion.FilterOnly, deletion.FilterAndDelete: deletionWorkDir := filepath.Join(c.cfg.WorkingDirectory, "deletion") c.deleteRequestsStore, err = deletion.NewDeleteStore(deletionWorkDir, c.indexStorageClient) @@ -237,9 +238,14 @@ func (c *Compactor) init(storageConfig storage.Config, schemaConfig config.Schem return err } c.DeleteRequestsHandler = deletion.NewDeleteRequestHandler(c.deleteRequestsStore, time.Hour, r) - c.deleteRequestsManager = deletion.NewDeleteRequestsManager(c.deleteRequestsStore, c.cfg.DeleteRequestCancelPeriod, r) + c.deleteRequestsManager = deletion.NewDeleteRequestsManager( + c.deleteRequestsStore, + c.cfg.DeleteRequestCancelPeriod, + r, + c.deleteMode, + ) c.expirationChecker = newExpirationChecker(retention.NewExpirationChecker(limits), c.deleteRequestsManager) - } else { + default: c.expirationChecker = newExpirationChecker( retention.NewExpirationChecker(limits), // This is a dummy deletion ExpirationChecker that never expires anything @@ -567,7 +573,7 @@ func newExpirationChecker(retentionExpiryChecker, deletionExpiryChecker retentio return &expirationChecker{retentionExpiryChecker, deletionExpiryChecker} } -func (e *expirationChecker) Expired(ref retention.ChunkEntry, now model.Time) (bool, []model.Interval) { +func (e *expirationChecker) Expired(ref retention.ChunkEntry, now model.Time) (bool, []retention.IntervalFilter) { if expired, nonDeletedIntervals := e.retentionExpiryChecker.Expired(ref, now); expired { return expired, nonDeletedIntervals } diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_request.go b/pkg/storage/stores/shipper/compactor/deletion/delete_request.go index 27aa24ddd256d..d6e26d472ef51 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_request.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_request.go @@ -1,10 +1,14 @@ package deletion import ( + "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/storage/stores/shipper/compactor/retention" + "github.com/grafana/loki/pkg/util/filter" + util_log "github.com/grafana/loki/pkg/util/log" ) type DeleteRequest struct { @@ -15,21 +19,60 @@ type DeleteRequest struct { Status DeleteRequestStatus `json:"status"` CreatedAt model.Time `json:"created_at"` - UserID string `json:"-"` - matchers []*labels.Matcher `json:"-"` + UserID string `json:"-"` + matchers []*labels.Matcher `json:"-"` + logSelectorExpr syntax.LogSelectorExpr `json:"-"` } func (d *DeleteRequest) SetQuery(logQL string) error { d.Query = logQL - matchers, err := parseDeletionQuery(logQL) + logSelectorExpr, err := parseDeletionQuery(logQL) if err != nil { return err } - d.matchers = matchers + d.logSelectorExpr = logSelectorExpr + d.matchers = logSelectorExpr.Matchers() return nil } -func (d *DeleteRequest) IsDeleted(entry retention.ChunkEntry) (bool, []model.Interval) { +// FilterFunction returns a filter function that returns true if the given line matches +func (d *DeleteRequest) FilterFunction(labels labels.Labels) (filter.Func, error) { + if d.logSelectorExpr == nil { + err := d.SetQuery(d.Query) + if err != nil { + return nil, err + } + } + p, err := d.logSelectorExpr.Pipeline() + if err != nil { + return nil, err + } + + if !allMatch(d.matchers, labels) { + return func(s string) bool { + return false + }, nil + } + + f := p.ForStream(labels).ProcessString + return func(s string) bool { + result, _, skip := f(0, s) + return len(result) != 0 || skip + }, nil +} + +func allMatch(matchers []*labels.Matcher, labels labels.Labels) bool { + for _, m := range matchers { + if !m.Matches(labels.Get(m.Name)) { + return false + } + } + return true +} + +// IsDeleted checks if the given ChunkEntry will be deleted by this DeleteRequest. +// It also returns the intervals of the ChunkEntry that will remain before filtering. +func (d *DeleteRequest) IsDeleted(entry retention.ChunkEntry) (bool, []retention.IntervalFilter) { if d.UserID != unsafeGetString(entry.UserID) { return false, nil } @@ -48,23 +91,51 @@ func (d *DeleteRequest) IsDeleted(entry retention.ChunkEntry) (bool, []model.Int return false, nil } + ff, err := d.FilterFunction(entry.Labels) + if err != nil { + // The query in the delete request is checked when added to the table. + // So this error should not occur. + level.Error(util_log.Logger).Log("msg", "unexpected error getting filter function", "err", err) + return false, nil + } + if d.StartTime <= entry.From && d.EndTime >= entry.Through { + // if the logSelectorExpr has a filter part return the chunk boundaries as intervals + if d.logSelectorExpr.HasFilter() { + return true, []retention.IntervalFilter{ + { + Interval: model.Interval{ + Start: entry.From, + End: entry.Through, + }, + Filter: ff, + }, + } + } + + // No filter in the logSelectorExpr so the whole chunk will be deleted return true, nil } - intervals := make([]model.Interval, 0, 2) + intervals := make([]retention.IntervalFilter, 0, 2) if d.StartTime > entry.From { - intervals = append(intervals, model.Interval{ - Start: entry.From, - End: d.StartTime - 1, + intervals = append(intervals, retention.IntervalFilter{ + Interval: model.Interval{ + Start: entry.From, + End: d.StartTime - 1, + }, + Filter: ff, }) } if d.EndTime < entry.Through { - intervals = append(intervals, model.Interval{ - Start: d.EndTime + 1, - End: entry.Through, + intervals = append(intervals, retention.IntervalFilter{ + Interval: model.Interval{ + Start: d.EndTime + 1, + End: entry.Through, + }, + Filter: ff, }) } diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_request_test.go b/pkg/storage/stores/shipper/compactor/deletion/delete_request_test.go index ec7232954d916..e609d10555408 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_request_test.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_request_test.go @@ -17,6 +17,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { user1 := "user1" lbl := `{foo="bar", fizz="buzz"}` + lblWithFilter := `{foo="bar", fizz="buzz"} |= "filter"` chunkEntry := retention.ChunkEntry{ ChunkRef: retention.ChunkRef{ @@ -29,7 +30,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { type resp struct { isDeleted bool - nonDeletedIntervals []model.Interval + nonDeletedIntervals []retention.IntervalFilter } for _, tc := range []struct { @@ -50,6 +51,26 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { nonDeletedIntervals: nil, }, }, + { + name: "whole chunk deleted with filter present", + deleteRequest: DeleteRequest{ + UserID: user1, + StartTime: now.Add(-3 * time.Hour), + EndTime: now.Add(-time.Hour), + Query: lblWithFilter, + }, + expectedResp: resp{ + isDeleted: true, + nonDeletedIntervals: []retention.IntervalFilter{ + { + Interval: model.Interval{ + Start: now.Add(-3 * time.Hour), + End: now.Add(-time.Hour), + }, + }, + }, + }, + }, { name: "chunk deleted from beginning", deleteRequest: DeleteRequest{ @@ -60,10 +81,12 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - nonDeletedIntervals: []model.Interval{ + nonDeletedIntervals: []retention.IntervalFilter{ { - Start: now.Add(-2*time.Hour) + 1, - End: now.Add(-time.Hour), + Interval: model.Interval{ + Start: now.Add(-2*time.Hour) + 1, + End: now.Add(-time.Hour), + }, }, }, }, @@ -78,10 +101,12 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - nonDeletedIntervals: []model.Interval{ + nonDeletedIntervals: []retention.IntervalFilter{ { - Start: now.Add(-3 * time.Hour), - End: now.Add(-2*time.Hour) - 1, + Interval: model.Interval{ + Start: now.Add(-3 * time.Hour), + End: now.Add(-2*time.Hour) - 1, + }, }, }, }, @@ -96,10 +121,32 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - nonDeletedIntervals: []model.Interval{ + nonDeletedIntervals: []retention.IntervalFilter{ { - Start: now.Add(-3 * time.Hour), - End: now.Add(-2*time.Hour) - 1, + Interval: model.Interval{ + Start: now.Add(-3 * time.Hour), + End: now.Add(-2*time.Hour) - 1, + }, + }, + }, + }, + }, + { + name: "chunk deleted from end with filter", + deleteRequest: DeleteRequest{ + UserID: user1, + StartTime: now.Add(-2 * time.Hour), + EndTime: now, + Query: lblWithFilter, + }, + expectedResp: resp{ + isDeleted: true, + nonDeletedIntervals: []retention.IntervalFilter{ + { + Interval: model.Interval{ + Start: now.Add(-3 * time.Hour), + End: now.Add(-2*time.Hour) - 1, + }, }, }, }, @@ -114,14 +161,18 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - nonDeletedIntervals: []model.Interval{ + nonDeletedIntervals: []retention.IntervalFilter{ { - Start: now.Add(-3 * time.Hour), - End: now.Add(-(2*time.Hour + 30*time.Minute)) - 1, + Interval: model.Interval{ + Start: now.Add(-3 * time.Hour), + End: now.Add(-(2*time.Hour + 30*time.Minute)) - 1, + }, }, { - Start: now.Add(-(time.Hour + 30*time.Minute)) + 1, - End: now.Add(-time.Hour), + Interval: model.Interval{ + Start: now.Add(-(time.Hour + 30*time.Minute)) + 1, + End: now.Add(-time.Hour), + }, }, }, }, @@ -167,7 +218,16 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { require.NoError(t, tc.deleteRequest.SetQuery(tc.deleteRequest.Query)) isDeleted, nonDeletedIntervals := tc.deleteRequest.IsDeleted(chunkEntry) require.Equal(t, tc.expectedResp.isDeleted, isDeleted) - require.Equal(t, tc.expectedResp.nonDeletedIntervals, nonDeletedIntervals) + for idx := range tc.expectedResp.nonDeletedIntervals { + require.Equal(t, + tc.expectedResp.nonDeletedIntervals[idx].Interval.Start, + nonDeletedIntervals[idx].Interval.Start, + ) + require.Equal(t, + tc.expectedResp.nonDeletedIntervals[idx].Interval.End, + nonDeletedIntervals[idx].Interval.End, + ) + } }) } } @@ -180,3 +240,44 @@ func mustParseLabel(input string) labels.Labels { return lbls } + +func TestDeleteRequest_FilterFunction(t *testing.T) { + dr := DeleteRequest{ + Query: `{foo="bar"} |= "some"`, + } + + lblStr := `{foo="bar"}` + lbls := mustParseLabel(lblStr) + + f, err := dr.FilterFunction(lbls) + require.NoError(t, err) + + require.True(t, f(`some line`)) + require.False(t, f("")) + require.False(t, f("other line")) + + lblStr = `{foo2="buzz"}` + lbls = mustParseLabel(lblStr) + + f, err = dr.FilterFunction(lbls) + require.NoError(t, err) + + require.False(t, f("")) + require.False(t, f("other line")) + require.False(t, f("some line")) + + dr = DeleteRequest{ + Query: `{namespace="default"}`, + } + + lblStr = `{namespace="default"}` + lbls = mustParseLabel(lblStr) + + f, err = dr.FilterFunction(lbls) + require.NoError(t, err) + + require.True(t, f(`some line`)) + require.True(t, f("")) + require.True(t, f("other line")) + +} diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager.go b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager.go index 76a73124b2db8..057a020fdf73f 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager.go @@ -24,21 +24,23 @@ type DeleteRequestsManager struct { deleteRequestCancelPeriod time.Duration deleteRequestsToProcess []DeleteRequest - chunkIntervalsToRetain []model.Interval + chunkIntervalsToRetain []retention.IntervalFilter // WARN: If by any chance we change deleteRequestsToProcessMtx to sync.RWMutex to be able to check multiple chunks at a time, // please take care of chunkIntervalsToRetain which should be unique per chunk. deleteRequestsToProcessMtx sync.Mutex metrics *deleteRequestsManagerMetrics wg sync.WaitGroup done chan struct{} + deletionMode Mode } -func NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeriod time.Duration, registerer prometheus.Registerer) *DeleteRequestsManager { +func NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeriod time.Duration, registerer prometheus.Registerer, mode Mode) *DeleteRequestsManager { dm := &DeleteRequestsManager{ deleteRequestsStore: store, deleteRequestCancelPeriod: deleteRequestCancelPeriod, metrics: newDeleteRequestsManagerMetrics(registerer), done: make(chan struct{}), + deletionMode: mode, } go dm.loop() @@ -123,7 +125,7 @@ func (d *DeleteRequestsManager) loadDeleteRequestsToProcess() error { return nil } -func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) (bool, []model.Interval) { +func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) (bool, []retention.IntervalFilter) { d.deleteRequestsToProcessMtx.Lock() defer d.deleteRequestsToProcessMtx.Unlock() @@ -132,20 +134,22 @@ func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) } d.chunkIntervalsToRetain = d.chunkIntervalsToRetain[:0] - d.chunkIntervalsToRetain = append(d.chunkIntervalsToRetain, model.Interval{ - Start: ref.From, - End: ref.Through, + d.chunkIntervalsToRetain = append(d.chunkIntervalsToRetain, retention.IntervalFilter{ + Interval: model.Interval{ + Start: ref.From, + End: ref.Through, + }, }) for _, deleteRequest := range d.deleteRequestsToProcess { - rebuiltIntervals := make([]model.Interval, 0, len(d.chunkIntervalsToRetain)) - for _, interval := range d.chunkIntervalsToRetain { + rebuiltIntervals := make([]retention.IntervalFilter, 0, len(d.chunkIntervalsToRetain)) + for _, ivf := range d.chunkIntervalsToRetain { entry := ref - entry.From = interval.Start - entry.Through = interval.End + entry.From = ivf.Interval.Start + entry.Through = ivf.Interval.End isDeleted, newIntervalsToRetain := deleteRequest.IsDeleted(entry) if !isDeleted { - rebuiltIntervals = append(rebuiltIntervals, interval) + rebuiltIntervals = append(rebuiltIntervals, ivf) } else { rebuiltIntervals = append(rebuiltIntervals, newIntervalsToRetain...) } @@ -158,7 +162,7 @@ func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) } } - if len(d.chunkIntervalsToRetain) == 1 && d.chunkIntervalsToRetain[0].Start == ref.From && d.chunkIntervalsToRetain[0].End == ref.Through { + if len(d.chunkIntervalsToRetain) == 1 && d.chunkIntervalsToRetain[0].Interval.Start == ref.From && d.chunkIntervalsToRetain[0].Interval.End == ref.Through { return false, nil } diff --git a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go index 9e3f314580e20..6f5f95caf6108 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go +++ b/pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go @@ -17,7 +17,7 @@ const testUserID = "test-user" func TestDeleteRequestsManager_Expired(t *testing.T) { type resp struct { isExpired bool - nonDeletedIntervals []model.Interval + nonDeletedIntervals []retention.IntervalFilter } now := model.Now() @@ -141,18 +141,24 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - nonDeletedIntervals: []model.Interval{ + nonDeletedIntervals: []retention.IntervalFilter{ { - Start: now.Add(-11*time.Hour) + 1, - End: now.Add(-10*time.Hour) - 1, + Interval: model.Interval{ + Start: now.Add(-11*time.Hour) + 1, + End: now.Add(-10*time.Hour) - 1, + }, }, { - Start: now.Add(-8*time.Hour) + 1, - End: now.Add(-6*time.Hour) - 1, + Interval: model.Interval{ + Start: now.Add(-8*time.Hour) + 1, + End: now.Add(-6*time.Hour) - 1, + }, }, { - Start: now.Add(-5*time.Hour) + 1, - End: now.Add(-2*time.Hour) - 1, + Interval: model.Interval{ + Start: now.Add(-5*time.Hour) + 1, + End: now.Add(-2*time.Hour) - 1, + }, }, }, }, @@ -207,12 +213,16 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - mgr := NewDeleteRequestsManager(mockDeleteRequestsStore{deleteRequests: tc.deleteRequestsFromStore}, time.Hour, nil) + mgr := NewDeleteRequestsManager(mockDeleteRequestsStore{deleteRequests: tc.deleteRequestsFromStore}, time.Hour, nil, WholeStreamDeletion) require.NoError(t, mgr.loadDeleteRequestsToProcess()) isExpired, nonDeletedIntervals := mgr.Expired(chunkEntry, model.Now()) require.Equal(t, tc.expectedResp.isExpired, isExpired) - require.Equal(t, tc.expectedResp.nonDeletedIntervals, nonDeletedIntervals) + for idx, interval := range nonDeletedIntervals { + require.Equal(t, tc.expectedResp.nonDeletedIntervals[idx].Interval.Start, interval.Interval.Start) + require.Equal(t, tc.expectedResp.nonDeletedIntervals[idx].Interval.End, interval.Interval.End) + require.NotNil(t, interval.Filter) + } }) } } diff --git a/pkg/storage/stores/shipper/compactor/deletion/validation.go b/pkg/storage/stores/shipper/compactor/deletion/validation.go index 1aacfbfbb50d8..68032e115fa03 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/validation.go +++ b/pkg/storage/stores/shipper/compactor/deletion/validation.go @@ -3,26 +3,19 @@ package deletion import ( "errors" - "github.com/prometheus/prometheus/model/labels" - "github.com/grafana/loki/pkg/logql/syntax" ) var ( - errInvalidQuery = errors.New("invalid query expression") - errUnsupportedQuery = errors.New("unsupported query expression") + errInvalidQuery = errors.New("invalid query expression") ) // parseDeletionQuery checks if the given logQL is valid for deletions -func parseDeletionQuery(query string) ([]*labels.Matcher, error) { - expr, err := syntax.ParseExpr(query) +func parseDeletionQuery(query string) (syntax.LogSelectorExpr, error) { + logSelectorExpr, err := syntax.ParseLogSelector(query, false) if err != nil { return nil, errInvalidQuery } - if matchersExpr, ok := expr.(*syntax.MatchersExpr); ok { - return matchersExpr.Matchers(), nil - } - - return nil, errUnsupportedQuery + return logSelectorExpr, nil } diff --git a/pkg/storage/stores/shipper/compactor/deletion/validation_test.go b/pkg/storage/stores/shipper/compactor/deletion/validation_test.go index 8a97fb72e47ea..fa5486c0c830b 100644 --- a/pkg/storage/stores/shipper/compactor/deletion/validation_test.go +++ b/pkg/storage/stores/shipper/compactor/deletion/validation_test.go @@ -8,32 +8,32 @@ import ( func TestParseLogQLExpressionForDeletion(t *testing.T) { t.Run("invalid logql", func(t *testing.T) { - matchers, err := parseDeletionQuery("gjgjg ggj") - require.Nil(t, matchers) + logSelectorExpr, err := parseDeletionQuery("gjgjg ggj") + require.Nil(t, logSelectorExpr) require.ErrorIs(t, err, errInvalidQuery) }) t.Run("matcher expression", func(t *testing.T) { - matchers, err := parseDeletionQuery(`{env="dev", secret="true"}`) - require.NotNil(t, matchers) + logSelectorExpr, err := parseDeletionQuery(`{env="dev", secret="true"}`) + require.NotNil(t, logSelectorExpr) require.NoError(t, err) }) t.Run("pipeline expression with line filter", func(t *testing.T) { - matchers, err := parseDeletionQuery(`{env="dev", secret="true"} |= "social sec number"`) - require.Nil(t, matchers) - require.ErrorIs(t, err, errUnsupportedQuery) + logSelectorExpr, err := parseDeletionQuery(`{env="dev", secret="true"} |= "social sec number"`) + require.NotNil(t, logSelectorExpr) + require.NoError(t, err) }) - t.Run("pipeline expression with label filter ", func(t *testing.T) { - matchers, err := parseDeletionQuery(`{env="dev", secret="true"} | json bob="top.params[0]"`) - require.Nil(t, matchers) - require.ErrorIs(t, err, errUnsupportedQuery) + t.Run("pipeline expression with multiple line filters", func(t *testing.T) { + logSelectorExpr, err := parseDeletionQuery(`{env="dev", secret="true"} |= "social sec number" |~ "[abd]*" `) + require.NotNil(t, logSelectorExpr) + require.NoError(t, err) }) - t.Run("metrics query", func(t *testing.T) { - matchers, err := parseDeletionQuery(`count_over_time({job="mysql"}[5m])`) - require.Nil(t, matchers) - require.ErrorIs(t, err, errUnsupportedQuery) + t.Run("pipeline expression with invalid line filter", func(t *testing.T) { + logSelectorExpr, err := parseDeletionQuery(`{env="dev", secret="true"} |= social sec number`) + require.Nil(t, logSelectorExpr) + require.ErrorIs(t, err, errInvalidQuery) }) } diff --git a/pkg/storage/stores/shipper/compactor/retention/expiration.go b/pkg/storage/stores/shipper/compactor/retention/expiration.go index e9aee2744027c..8f9617b03741b 100644 --- a/pkg/storage/stores/shipper/compactor/retention/expiration.go +++ b/pkg/storage/stores/shipper/compactor/retention/expiration.go @@ -8,12 +8,21 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/loki/pkg/util/filter" util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/validation" ) +// IntervalFilter contains the interval to delete +// and the function that filters lines. These will be +// applied to a chunk. +type IntervalFilter struct { + Interval model.Interval + Filter filter.Func +} + type ExpirationChecker interface { - Expired(ref ChunkEntry, now model.Time) (bool, []model.Interval) + Expired(ref ChunkEntry, now model.Time) (bool, []IntervalFilter) IntervalMayHaveExpiredChunks(interval model.Interval, userID string) bool MarkPhaseStarted() MarkPhaseFailed() @@ -40,7 +49,7 @@ func NewExpirationChecker(limits Limits) ExpirationChecker { } // Expired tells if a ref chunk is expired based on retention rules. -func (e *expirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []model.Interval) { +func (e *expirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []IntervalFilter) { userID := unsafeGetString(ref.UserID) period := e.tenantsRetention.RetentionPeriodFor(userID, ref.Labels) return now.Sub(ref.Through) > period, nil @@ -88,7 +97,7 @@ func NeverExpiringExpirationChecker(limits Limits) ExpirationChecker { type neverExpiringExpirationChecker struct{} -func (e *neverExpiringExpirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []model.Interval) { +func (e *neverExpiringExpirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []IntervalFilter) { return false, nil } func (e *neverExpiringExpirationChecker) IntervalMayHaveExpiredChunks(interval model.Interval, userID string) bool { diff --git a/pkg/storage/stores/shipper/compactor/retention/expiration_test.go b/pkg/storage/stores/shipper/compactor/retention/expiration_test.go index e51a93cbe0ade..d7945ae924b1b 100644 --- a/pkg/storage/stores/shipper/compactor/retention/expiration_test.go +++ b/pkg/storage/stores/shipper/compactor/retention/expiration_test.go @@ -81,9 +81,9 @@ func Test_expirationChecker_Expired(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual, nonDeletedIntervals := e.Expired(tt.ref, model.Now()) + actual, nonDeletedIntervalFilters := e.Expired(tt.ref, model.Now()) require.Equal(t, tt.want, actual) - require.Nil(t, nonDeletedIntervals) + require.Nil(t, nonDeletedIntervalFilters) }) } } diff --git a/pkg/storage/stores/shipper/compactor/retention/retention.go b/pkg/storage/stores/shipper/compactor/retention/retention.go index f6a883b7cb0c9..033bd4195fd7a 100644 --- a/pkg/storage/stores/shipper/compactor/retention/retention.go +++ b/pkg/storage/stores/shipper/compactor/retention/retention.go @@ -151,11 +151,11 @@ func markforDelete(ctx context.Context, tableName string, marker MarkerStorageWr seriesMap.Add(c.SeriesID, c.UserID, c.Labels) // see if the chunk is deleted completely or partially - if expired, nonDeletedIntervals := expiration.Expired(c, now); expired { - if len(nonDeletedIntervals) > 0 { - wroteChunks, err := chunkRewriter.rewriteChunk(ctx, c, nonDeletedIntervals) + if expired, nonDeletedIntervalFilters := expiration.Expired(c, now); expired { + if len(nonDeletedIntervalFilters) > 0 { + wroteChunks, err := chunkRewriter.rewriteChunk(ctx, c, nonDeletedIntervalFilters) if err != nil { - return false, false, fmt.Errorf("failed to rewrite chunk %s for interval %s with error %s", c.ChunkID, nonDeletedIntervals, err) + return false, false, fmt.Errorf("failed to rewrite chunk %s for intervals %+v with error %s", c.ChunkID, nonDeletedIntervalFilters, err) } if wroteChunks { @@ -173,7 +173,7 @@ func markforDelete(ctx context.Context, tableName string, marker MarkerStorageWr // Mark the chunk for deletion only if it is completely deleted, or this is the last table that the chunk is index in. // For a partially deleted chunk, if we delete the source chunk before all the tables which index it are processed then // the retention would fail because it would fail to find it in the storage. - if len(nonDeletedIntervals) == 0 || c.Through <= tableInterval.End { + if len(nonDeletedIntervalFilters) == 0 || c.Through <= tableInterval.End { if err := marker.Put(c.ChunkID); err != nil { return false, false, err } @@ -307,7 +307,7 @@ func newChunkRewriter(chunkClient client.Client, schemaCfg config.PeriodConfig, }, nil } -func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, intervals []model.Interval) (bool, error) { +func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, intervalFilters []IntervalFilter) (bool, error) { userID := unsafeGetString(ce.UserID) chunkID := unsafeGetString(ce.ChunkID) @@ -327,9 +327,17 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, interva wroteChunks := false - for _, interval := range intervals { - newChunkData, err := chks[0].Data.Rebound(interval.Start, interval.End) + for _, ivf := range intervalFilters { + start := ivf.Interval.Start + end := ivf.Interval.End + + newChunkData, err := chks[0].Data.Rebound(start, end, ivf.Filter) if err != nil { + if errors.Is(err, chunk.ErrSliceNoDataInRange) { + level.Info(util_log.Logger).Log("msg", "Rebound leaves an empty chunk", "chunk ref", string(ce.ChunkRef.ChunkID)) + // skip empty chunks + continue + } return false, err } @@ -341,8 +349,8 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, interva newChunk := chunk.NewChunk( userID, chks[0].FingerprintModel(), chks[0].Metric, facade, - interval.Start, - interval.End, + start, + end, ) err = newChunk.Encode() @@ -350,7 +358,7 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, interva return false, err } - entries, err := c.seriesStoreSchema.GetChunkWriteEntries(interval.Start, interval.End, userID, "logs", newChunk.Metric, c.scfg.ExternalKey(newChunk.ChunkRef)) + entries, err := c.seriesStoreSchema.GetChunkWriteEntries(newChunk.From, newChunk.Through, userID, "logs", newChunk.Metric, c.scfg.ExternalKey(newChunk.ChunkRef)) if err != nil { return false, err } diff --git a/pkg/storage/stores/shipper/compactor/retention/retention_test.go b/pkg/storage/stores/shipper/compactor/retention/retention_test.go index d973fc0a5b6db..62ff331ee22b9 100644 --- a/pkg/storage/stores/shipper/compactor/retention/retention_test.go +++ b/pkg/storage/stores/shipper/compactor/retention/retention_test.go @@ -313,9 +313,9 @@ func TestChunkRewriter(t *testing.T) { minListMarkDelay = 1 * time.Second now := model.Now() for _, tt := range []struct { - name string - chunk chunk.Chunk - rewriteIntervals []model.Interval + name string + chunk chunk.Chunk + rewriteIntervalFilters []IntervalFilter }{ { name: "no rewrites", @@ -328,56 +328,87 @@ func TestChunkRewriter(t *testing.T) { { name: "rewrite first half", chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, now.Add(-2*time.Hour), now), - rewriteIntervals: []model.Interval{ + rewriteIntervalFilters: []IntervalFilter{ { - Start: now.Add(-2 * time.Hour), - End: now.Add(-1 * time.Hour), + Interval: model.Interval{ + Start: now.Add(-2 * time.Hour), + End: now.Add(-1 * time.Hour), + }, }, }, }, { name: "rewrite second half", chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, now.Add(-2*time.Hour), now), - rewriteIntervals: []model.Interval{ + rewriteIntervalFilters: []IntervalFilter{ { - Start: now.Add(-time.Hour), - End: now, + Interval: model.Interval{ + Start: now.Add(-time.Hour), + End: now, + }, }, }, }, { name: "rewrite multiple intervals", chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, now.Add(-12*time.Hour), now), - rewriteIntervals: []model.Interval{ + rewriteIntervalFilters: []IntervalFilter{ { - Start: now.Add(-12 * time.Hour), - End: now.Add(-10 * time.Hour), + Interval: model.Interval{ + Start: now.Add(-12 * time.Hour), + End: now.Add(-10 * time.Hour), + }, }, { - Start: now.Add(-9 * time.Hour), - End: now.Add(-5 * time.Hour), + Interval: model.Interval{ + Start: now.Add(-9 * time.Hour), + End: now.Add(-5 * time.Hour), + }, }, { - Start: now.Add(-2 * time.Hour), - End: now, + Interval: model.Interval{ + Start: now.Add(-2 * time.Hour), + End: now, + }, }, }, }, { name: "rewrite chunk spanning multiple days with multiple intervals", chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, now.Add(-72*time.Hour), now), - rewriteIntervals: []model.Interval{ + rewriteIntervalFilters: []IntervalFilter{ { - Start: now.Add(-71 * time.Hour), - End: now.Add(-47 * time.Hour), + Interval: model.Interval{ + Start: now.Add(-71 * time.Hour), + End: now.Add(-47 * time.Hour), + }, + }, + { + Interval: model.Interval{ + Start: now.Add(-40 * time.Hour), + End: now.Add(-30 * time.Hour), + }, }, { - Start: now.Add(-40 * time.Hour), - End: now.Add(-30 * time.Hour), + Interval: model.Interval{ + Start: now.Add(-2 * time.Hour), + End: now, + }, }, + }, + }, + { + name: "remove no lines using a filter function", + chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, now.Add(-2*time.Hour), now), + rewriteIntervalFilters: []IntervalFilter{ { - Start: now.Add(-2 * time.Hour), - End: now, + Interval: model.Interval{ + Start: now.Add(-1 * time.Hour), + End: now, + }, + Filter: func(s string) bool { + return false + }, }, }, }, @@ -401,9 +432,9 @@ func TestChunkRewriter(t *testing.T) { cr, err := newChunkRewriter(chunkClient, store.schemaCfg.Configs[0], indexTable.name, bucket) require.NoError(t, err) - wroteChunks, err := cr.rewriteChunk(context.Background(), entryFromChunk(store.schemaCfg, tt.chunk), tt.rewriteIntervals) + wroteChunks, err := cr.rewriteChunk(context.Background(), entryFromChunk(store.schemaCfg, tt.chunk), tt.rewriteIntervalFilters) require.NoError(t, err) - if len(tt.rewriteIntervals) == 0 { + if len(tt.rewriteIntervalFilters) == 0 { require.False(t, wroteChunks) } return nil @@ -416,9 +447,9 @@ func TestChunkRewriter(t *testing.T) { chunks := store.GetChunks(tt.chunk.UserID, tt.chunk.From, tt.chunk.Through, tt.chunk.Metric) // number of chunks should be the new re-written chunks + the source chunk - require.Len(t, chunks, len(tt.rewriteIntervals)+1) - for _, interval := range tt.rewriteIntervals { - expectedChk := createChunk(t, tt.chunk.UserID, labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, interval.Start, interval.End) + require.Len(t, chunks, len(tt.rewriteIntervalFilters)+1) + for _, ivf := range tt.rewriteIntervalFilters { + expectedChk := createChunk(t, tt.chunk.UserID, labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, ivf.Interval.Start, ivf.Interval.End) for i, chk := range chunks { if store.schemaCfg.ExternalKey(chk.ChunkRef) == store.schemaCfg.ExternalKey(expectedChk.ChunkRef) { chunks = append(chunks[:i], chunks[i+1:]...) @@ -450,8 +481,8 @@ func (s *seriesCleanedRecorder) Cleanup(userID []byte, lbls labels.Labels) error } type chunkExpiry struct { - isExpired bool - nonDeletedIntervals []model.Interval + isExpired bool + nonDeletedIntervalFilters []IntervalFilter } type mockExpirationChecker struct { @@ -463,9 +494,9 @@ func newMockExpirationChecker(chunksExpiry map[string]chunkExpiry) mockExpiratio return mockExpirationChecker{chunksExpiry: chunksExpiry} } -func (m mockExpirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []model.Interval) { +func (m mockExpirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []IntervalFilter) { ce := m.chunksExpiry[string(ref.ChunkID)] - return ce.isExpired, ce.nonDeletedIntervals + return ce.isExpired, ce.nonDeletedIntervalFilters } func (m mockExpirationChecker) DropFromIndex(ref ChunkEntry, tableEndTime model.Time, now model.Time) bool { @@ -534,9 +565,11 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { expiry: []chunkExpiry{ { isExpired: true, - nonDeletedIntervals: []model.Interval{{ - Start: todaysTableInterval.Start, - End: todaysTableInterval.Start.Add(15 * time.Minute), + nonDeletedIntervalFilters: []IntervalFilter{{ + Interval: model.Interval{ + Start: todaysTableInterval.Start, + End: todaysTableInterval.Start.Add(15 * time.Minute), + }, }}, }, }, @@ -586,9 +619,11 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { }, { isExpired: true, - nonDeletedIntervals: []model.Interval{{ - Start: todaysTableInterval.Start, - End: todaysTableInterval.Start.Add(15 * time.Minute), + nonDeletedIntervalFilters: []IntervalFilter{{ + Interval: model.Interval{ + Start: todaysTableInterval.Start, + End: todaysTableInterval.Start.Add(15 * time.Minute), + }, }}, }, }, @@ -610,9 +645,11 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { expiry: []chunkExpiry{ { isExpired: true, - nonDeletedIntervals: []model.Interval{{ - Start: todaysTableInterval.Start, - End: now, + nonDeletedIntervalFilters: []IntervalFilter{{ + Interval: model.Interval{ + Start: todaysTableInterval.Start, + End: now, + }, }}, }, }, @@ -634,9 +671,11 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { expiry: []chunkExpiry{ { isExpired: true, - nonDeletedIntervals: []model.Interval{{ - Start: todaysTableInterval.Start.Add(-30 * time.Minute), - End: now, + nonDeletedIntervalFilters: []IntervalFilter{{ + Interval: model.Interval{ + Start: todaysTableInterval.Start.Add(-30 * time.Minute), + End: now, + }, }}, }, }, diff --git a/pkg/util/filter/filter_function.go b/pkg/util/filter/filter_function.go new file mode 100644 index 0000000000000..58ba74fe8b7e0 --- /dev/null +++ b/pkg/util/filter/filter_function.go @@ -0,0 +1,3 @@ +package filter + +type Func func(string) bool From 084a0804f164487b41b023e5b4ba1c62254da328 Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Fri, 29 Apr 2022 14:00:36 +0100 Subject: [PATCH 030/223] docker-compose: Disable auth in Grafana (#5998) --- tools/dev/loki-boltdb-storage-s3/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/dev/loki-boltdb-storage-s3/docker-compose.yml b/tools/dev/loki-boltdb-storage-s3/docker-compose.yml index 742d40f3d4c60..c1c88f0493591 100644 --- a/tools/dev/loki-boltdb-storage-s3/docker-compose.yml +++ b/tools/dev/loki-boltdb-storage-s3/docker-compose.yml @@ -240,6 +240,8 @@ services: - querier environment: - GF_PATHS_PROVISIONING=/etc/config/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin ports: - 3000:3000 volumes: From 6c1a62d8298748ba2fc92f40bb904fdf96178aa1 Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Fri, 29 Apr 2022 14:01:38 +0100 Subject: [PATCH 031/223] Ensure proto definitions are formatted [2/2] (#6000) * Update build image to 0.10.4 * Add a fmt target for proto defintions This uses buf to ensure the proto defintions are more consistently formatted. * Update formatted files --- .drone/drone.yml | 18 +- Makefile | 22 +- pkg/ingester/checkpoint.pb.go | 58 +++--- pkg/ingester/checkpoint.proto | 41 +++- pkg/ingester/client/ingester.pb.go | 14 +- pkg/ingester/client/ingester.proto | 48 +++-- pkg/logproto/logproto.pb.go | 196 +++++++++--------- pkg/logproto/logproto.proto | 161 +++++++++----- pkg/logproto/metrics.proto | 34 +-- pkg/logqlmodel/stats/stats.proto | 48 +++-- .../frontend/v1/frontendv1pb/frontend.proto | 5 +- .../frontend/v2/frontendv2pb/frontend.proto | 17 +- pkg/querier/queryrange/queryrange.pb.go | 118 +++++------ pkg/querier/queryrange/queryrange.proto | 75 +++++-- .../queryrangebase/queryrange.pb.go | 12 +- .../queryrangebase/queryrange.proto | 46 ++-- pkg/querier/stats/stats.proto | 5 +- pkg/ruler/base/ruler.proto | 55 +++-- pkg/ruler/rulespb/rules.pb.go | 10 +- pkg/ruler/rulespb/rules.proto | 19 +- pkg/scheduler/schedulerpb/scheduler.proto | 7 +- pkg/storage/chunk/client/grpc/grpc.proto | 146 +++++++------ .../series/index/caching_index_client.proto | 24 ++- .../indexgateway/indexgatewaypb/gateway.pb.go | 75 ++++--- .../indexgateway/indexgatewaypb/gateway.proto | 83 +++++--- 25 files changed, 790 insertions(+), 547 deletions(-) diff --git a/.drone/drone.yml b/.drone/drone.yml index bef97d742cca1..90a2691ee8388 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -40,27 +40,27 @@ steps: - make BUILD_IN_CONTAINER=false check-drone-drift depends_on: - clone - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: check-drone-drift - commands: - make BUILD_IN_CONTAINER=false check-generated-files depends_on: - clone - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: check-generated-files - commands: - make BUILD_IN_CONTAINER=false test depends_on: - clone - check-generated-files - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: test - commands: - make BUILD_IN_CONTAINER=false lint depends_on: - clone - check-generated-files - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: lint - commands: - make BUILD_IN_CONTAINER=false check-mod @@ -68,7 +68,7 @@ steps: - clone - test - lint - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: check-mod - commands: - apk add make bash && make lint-scripts @@ -79,19 +79,19 @@ steps: depends_on: - clone - check-generated-files - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: loki - commands: - make BUILD_IN_CONTAINER=false validate-example-configs depends_on: - loki - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: validate-example-configs - commands: - make BUILD_IN_CONTAINER=false check-example-config-doc depends_on: - clone - image: grafana/loki-build-image:0.20.3 + image: grafana/loki-build-image:0.20.4 name: check-example-config-doc trigger: event: @@ -1118,6 +1118,6 @@ kind: secret name: deploy_config --- kind: signature -hmac: 3e10123a8afd1837124250ba4eb4e6db1f9a324fd47e01371a33e23cf7df0a8a +hmac: 96966b3eec7f8976408f2c2f2c36a29b87a694c8a32afb2fdb16209e1f9e7521 ... diff --git a/Makefile b/Makefile index 7b17411cff567..278aec0cd3629 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) BUILD_IN_CONTAINER ?= true # ensure you run `make drone` after changing this -BUILD_IMAGE_VERSION := 0.20.3 +BUILD_IMAGE_VERSION := 0.20.4 # Docker image info IMAGE_PREFIX ?= grafana @@ -115,8 +115,8 @@ binfmt: all: promtail logcli loki loki-canary # This is really a check for the CI to make sure generated files are built and checked in manually -check-generated-files: yacc ragel protos clients/pkg/promtail/server/ui/assets_vfsdata.go - @if ! (git diff --exit-code $(YACC_GOS) $(RAGEL_GOS) $(PROTO_GOS) $(PROMTAIL_GENERATED_FILE)); then \ +check-generated-files: yacc ragel fmt-proto protos clients/pkg/promtail/server/ui/assets_vfsdata.go + @if ! (git diff --exit-code $(YACC_GOS) $(RAGEL_GOS) $(PROTO_DEFS) $(PROTO_GOS) $(PROMTAIL_GENERATED_FILE)); then \ echo "\nChanges found in generated files"; \ echo "Run 'make check-generated-files' and commit the changes to fix this error."; \ echo "If you are actively developing these files you can ignore this error"; \ @@ -620,6 +620,22 @@ fmt-jsonnet: @find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ xargs -n 1 -- jsonnetfmt -i +fmt-proto: +ifeq ($(BUILD_IN_CONTAINER),true) + # I wish we could make this a multiline variable however you can't pass more than simple arguments to them + @mkdir -p $(shell pwd)/.pkg + @mkdir -p $(shell pwd)/.cache + $(SUDO) docker run $(RM) $(TTY) -i \ + -v $(shell pwd)/.cache:/go/cache$(MOUNT_FLAGS) \ + -v $(shell pwd)/.pkg:/go/pkg$(MOUNT_FLAGS) \ + -v $(shell pwd):/src/loki$(MOUNT_FLAGS) \ + $(IMAGE_PREFIX)/loki-build-image:$(BUILD_IMAGE_VERSION) $@; +else + echo '$(PROTO_DEFS)' | \ + xargs -n 1 -- buf format -w +endif + + lint-scripts: # Ignore https://github.com/koalaman/shellcheck/wiki/SC2312 @find . -name '*.sh' -not -path '*/vendor/*' -print0 | \ diff --git a/pkg/ingester/checkpoint.pb.go b/pkg/ingester/checkpoint.pb.go index 1748aa0db746a..75a0f32a578df 100644 --- a/pkg/ingester/checkpoint.pb.go +++ b/pkg/ingester/checkpoint.pb.go @@ -242,40 +242,40 @@ func init() { func init() { proto.RegisterFile("pkg/ingester/checkpoint.proto", fileDescriptor_00f4b7152db9bdb5) } var fileDescriptor_00f4b7152db9bdb5 = []byte{ - // 523 bytes of a gzipped FileDescriptorProto + // 521 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x31, 0x8f, 0xd3, 0x30, 0x14, 0x8e, 0xdb, 0x34, 0xd7, 0xba, 0xb0, 0x58, 0x08, 0x99, 0x22, 0xdc, 0xea, 0xa6, 0x2e, 0x24, 0xa2, 0x30, 0xc0, 0x82, 0x74, 0x3d, 0x84, 0x84, 0xd4, 0x01, 0x85, 0x63, 0x61, 0x41, 0x6e, 0xe2, 0x26, 0x51, 0xd3, 0x38, 0xb2, 0x9d, 0xa1, 0x1b, 0x3f, 0xe1, 0x06, 0x7e, 0x04, 0x3f, 0xe5, 0xc6, 0x8e, 0x27, 0x90, 0x0e, 0x9a, 0x2e, 0x8c, 0xf7, 0x13, 0x90, 0x9d, 0xa4, 0x57, 0xb6, 0xeb, 0xf6, - 0xbe, 0xf7, 0xde, 0xf7, 0x9e, 0xfd, 0xbd, 0x0f, 0x3e, 0xcb, 0x97, 0x91, 0x97, 0x64, 0x11, 0x93, - 0x8a, 0x09, 0x2f, 0x88, 0x59, 0xb0, 0xcc, 0x79, 0x92, 0x29, 0x37, 0x17, 0x5c, 0x71, 0xf4, 0x30, - 0xe5, 0xcb, 0xe4, 0x6b, 0x53, 0x1f, 0x0c, 0x23, 0xce, 0xa3, 0x94, 0x79, 0xa6, 0x38, 0x2f, 0x16, - 0x9e, 0x4a, 0x56, 0x4c, 0x2a, 0xba, 0xca, 0xab, 0xfe, 0xc1, 0xf3, 0x28, 0x51, 0x71, 0x31, 0x77, - 0x03, 0xbe, 0xf2, 0x22, 0x1e, 0xf1, 0xbb, 0x4e, 0x8d, 0x0c, 0x30, 0x51, 0xdd, 0xfe, 0x54, 0x6f, - 0x4f, 0x79, 0x54, 0x15, 0x9a, 0xa0, 0x2a, 0x9e, 0xfe, 0x6a, 0xc1, 0xce, 0x79, 0x5c, 0x64, 0x4b, - 0xf4, 0x1a, 0xda, 0x0b, 0xc1, 0x57, 0x18, 0x8c, 0xc0, 0xb8, 0x3f, 0x19, 0xb8, 0xd5, 0x2b, 0xdc, - 0x66, 0xb6, 0x7b, 0xd1, 0xbc, 0x62, 0xda, 0xbd, 0xba, 0x19, 0x5a, 0x97, 0xbf, 0x87, 0xc0, 0x37, - 0x0c, 0xf4, 0x0a, 0xb6, 0x14, 0xc7, 0xad, 0x23, 0x78, 0x2d, 0xc5, 0xd1, 0x14, 0xf6, 0x16, 0x69, - 0x21, 0x63, 0x16, 0x9e, 0x29, 0xdc, 0x3e, 0x82, 0x7c, 0x47, 0x43, 0xef, 0x61, 0x3f, 0xa5, 0x52, - 0x7d, 0xce, 0x43, 0xaa, 0x58, 0x88, 0xed, 0x23, 0xa6, 0x1c, 0x12, 0xd1, 0x63, 0xe8, 0x04, 0x29, - 0x97, 0x2c, 0xc4, 0x9d, 0x11, 0x18, 0x77, 0xfd, 0x1a, 0xe9, 0xbc, 0x5c, 0x67, 0x01, 0x0b, 0xb1, - 0x53, 0xe5, 0x2b, 0x84, 0x10, 0xb4, 0x43, 0xaa, 0x28, 0x3e, 0x19, 0x81, 0xf1, 0x03, 0xdf, 0xc4, - 0x3a, 0x17, 0x33, 0x1a, 0xe2, 0x6e, 0x95, 0xd3, 0xf1, 0xe9, 0xf7, 0x36, 0x74, 0x3e, 0x31, 0x91, - 0x30, 0xa9, 0x47, 0x15, 0x92, 0x89, 0x0f, 0xef, 0x8c, 0xc0, 0x3d, 0xbf, 0x46, 0x68, 0x04, 0xfb, - 0x0b, 0x7d, 0x7a, 0x91, 0x8b, 0x24, 0x53, 0x46, 0x45, 0xdb, 0x3f, 0x4c, 0xa1, 0x14, 0x3a, 0x29, - 0x9d, 0xb3, 0x54, 0xe2, 0xf6, 0xa8, 0x3d, 0xee, 0x4f, 0x9e, 0xb8, 0xfb, 0x1b, 0xce, 0x58, 0x44, - 0x83, 0xf5, 0x4c, 0x57, 0x3f, 0xd2, 0x44, 0x4c, 0xdf, 0xe8, 0xef, 0xfd, 0xbc, 0x19, 0xbe, 0x38, - 0x74, 0x88, 0xa0, 0x0b, 0x9a, 0x51, 0x4f, 0x9b, 0xcc, 0x3b, 0xb4, 0x82, 0x6b, 0x78, 0x67, 0x21, - 0xcd, 0x15, 0x13, 0x7e, 0xbd, 0x03, 0x4d, 0xa0, 0x13, 0x68, 0x3f, 0x48, 0x6c, 0x9b, 0x6d, 0x8f, - 0xdc, 0xff, 0xdc, 0xe9, 0x1a, 0xb3, 0x4c, 0x6d, 0xbd, 0xc8, 0xaf, 0x3b, 0x6b, 0x03, 0x74, 0x8e, - 0x34, 0xc0, 0x00, 0x76, 0xf5, 0x0d, 0x66, 0x49, 0xc6, 0x8c, 0xbc, 0x3d, 0x7f, 0x8f, 0x11, 0x86, - 0x27, 0x2c, 0x53, 0x62, 0x7d, 0xae, 0x8c, 0xc6, 0x6d, 0xbf, 0x81, 0xda, 0x36, 0x71, 0x12, 0xc5, - 0x4c, 0xaa, 0x0b, 0x69, 0xb4, 0xbe, 0xb7, 0x6d, 0xf6, 0xb4, 0xe9, 0xdb, 0xcd, 0x96, 0x58, 0xd7, - 0x5b, 0x62, 0xdd, 0x6e, 0x09, 0xf8, 0x56, 0x12, 0xf0, 0xa3, 0x24, 0xe0, 0xaa, 0x24, 0x60, 0x53, - 0x12, 0xf0, 0xa7, 0x24, 0xe0, 0x6f, 0x49, 0xac, 0xdb, 0x92, 0x80, 0xcb, 0x1d, 0xb1, 0x36, 0x3b, - 0x62, 0x5d, 0xef, 0x88, 0xf5, 0xa5, 0xdb, 0x68, 0x30, 0x77, 0xcc, 0xa2, 0x97, 0xff, 0x02, 0x00, - 0x00, 0xff, 0xff, 0xee, 0x20, 0xe0, 0x2d, 0xd8, 0x03, 0x00, 0x00, + 0xbe, 0xe7, 0xf7, 0xbd, 0x4f, 0xfe, 0xde, 0x07, 0x9f, 0xe5, 0xcb, 0xc8, 0x4b, 0xb2, 0x88, 0x49, + 0xc5, 0x84, 0x17, 0xc4, 0x2c, 0x58, 0xe6, 0x3c, 0xc9, 0x94, 0x9b, 0x0b, 0xae, 0x38, 0x7a, 0x98, + 0xf2, 0x65, 0xf2, 0xb5, 0x79, 0x1f, 0x3c, 0x8f, 0x12, 0x15, 0x17, 0x73, 0x37, 0xe0, 0x2b, 0x2f, + 0xe2, 0x11, 0xf7, 0xcc, 0xd4, 0xbc, 0x58, 0x18, 0x64, 0x80, 0xa9, 0x2a, 0xf6, 0x60, 0x18, 0x71, + 0x1e, 0xa5, 0xec, 0x6e, 0x4a, 0x25, 0x2b, 0x26, 0x15, 0x5d, 0xe5, 0xf5, 0xc0, 0x53, 0xad, 0x9e, + 0xf2, 0xa8, 0x62, 0x36, 0x45, 0xf5, 0x78, 0xfa, 0xab, 0x05, 0x3b, 0xe7, 0x71, 0x91, 0x2d, 0xd1, + 0x6b, 0x68, 0x2f, 0x04, 0x5f, 0x61, 0x30, 0x02, 0xe3, 0xfe, 0x64, 0xe0, 0x56, 0x6b, 0xdd, 0x66, + 0xad, 0x7b, 0xd1, 0xac, 0x9d, 0x76, 0xaf, 0x6e, 0x86, 0xd6, 0xe5, 0xef, 0x21, 0xf0, 0x0d, 0x03, + 0xbd, 0x82, 0x2d, 0xc5, 0x71, 0xeb, 0x08, 0x5e, 0x4b, 0x71, 0x34, 0x85, 0xbd, 0x45, 0x5a, 0xc8, + 0x98, 0x85, 0x67, 0x0a, 0xb7, 0x8f, 0x20, 0xdf, 0xd1, 0xd0, 0x7b, 0xd8, 0x4f, 0xa9, 0x54, 0x9f, + 0xf3, 0x90, 0x2a, 0x16, 0x62, 0xfb, 0x88, 0x2d, 0x87, 0x44, 0xf4, 0x18, 0x3a, 0x41, 0xca, 0x25, + 0x0b, 0x71, 0x67, 0x04, 0xc6, 0x5d, 0xbf, 0x46, 0xba, 0x2f, 0xd7, 0x59, 0xc0, 0x42, 0xec, 0x54, + 0xfd, 0x0a, 0x21, 0x04, 0xed, 0x90, 0x2a, 0x8a, 0x4f, 0x46, 0x60, 0xfc, 0xc0, 0x37, 0xb5, 0xee, + 0xc5, 0x8c, 0x86, 0xb8, 0x5b, 0xf5, 0x74, 0x7d, 0xfa, 0xbd, 0x0d, 0x9d, 0x4f, 0x4c, 0x24, 0x4c, + 0xea, 0x55, 0x85, 0x64, 0xe2, 0xc3, 0x3b, 0x63, 0x70, 0xcf, 0xaf, 0x11, 0x1a, 0xc1, 0xfe, 0x42, + 0x9f, 0x5e, 0xe4, 0x22, 0xc9, 0x94, 0x71, 0xd1, 0xf6, 0x0f, 0x5b, 0x28, 0x85, 0x4e, 0x4a, 0xe7, + 0x2c, 0x95, 0xb8, 0x3d, 0x6a, 0x8f, 0xfb, 0x93, 0x27, 0xee, 0xfe, 0x86, 0x33, 0x16, 0xd1, 0x60, + 0x3d, 0xd3, 0xaf, 0x1f, 0x69, 0x22, 0xa6, 0x6f, 0xf4, 0xf7, 0x7e, 0xde, 0x0c, 0x5f, 0x1c, 0x46, + 0x48, 0xd0, 0x05, 0xcd, 0xa8, 0xa7, 0x43, 0xe6, 0x1d, 0x46, 0xc1, 0x35, 0xbc, 0xb3, 0x90, 0xe6, + 0x8a, 0x09, 0xbf, 0xd6, 0x40, 0x13, 0xe8, 0x04, 0x3a, 0x0f, 0x12, 0xdb, 0x46, 0xed, 0x91, 0xfb, + 0x5f, 0x3a, 0x5d, 0x13, 0x96, 0xa9, 0xad, 0x85, 0xfc, 0x7a, 0xb2, 0x0e, 0x40, 0xe7, 0xc8, 0x00, + 0x0c, 0x60, 0x57, 0xdf, 0x60, 0x96, 0x64, 0xcc, 0xd8, 0xdb, 0xf3, 0xf7, 0x18, 0x61, 0x78, 0xc2, + 0x32, 0x25, 0xd6, 0xe7, 0xca, 0x78, 0xdc, 0xf6, 0x1b, 0xa8, 0x63, 0x13, 0x27, 0x51, 0xcc, 0xa4, + 0xba, 0x90, 0xc6, 0xeb, 0x7b, 0xc7, 0x66, 0x4f, 0x9b, 0xbe, 0xdd, 0x6c, 0x89, 0x75, 0xbd, 0x25, + 0xd6, 0xed, 0x96, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0xae, 0x4a, 0x02, 0x36, 0x25, 0x01, + 0x7f, 0x4a, 0x02, 0xfe, 0x96, 0xc4, 0xba, 0x2d, 0x09, 0xb8, 0xdc, 0x11, 0x6b, 0xb3, 0x23, 0xd6, + 0xf5, 0x8e, 0x58, 0x5f, 0xba, 0x8d, 0x07, 0x73, 0xc7, 0x08, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, + 0xff, 0x9c, 0x7d, 0xef, 0x56, 0xd8, 0x03, 0x00, 0x00, } func (this *Chunk) Equal(that interface{}) bool { diff --git a/pkg/ingester/checkpoint.proto b/pkg/ingester/checkpoint.proto index b1f530dd1a9e8..dade99fe59e2f 100644 --- a/pkg/ingester/checkpoint.proto +++ b/pkg/ingester/checkpoint.proto @@ -2,19 +2,31 @@ syntax = "proto3"; package loki_ingester; -option go_package = "ingester"; - -import "google/protobuf/timestamp.proto"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; import "pkg/logproto/logproto.proto"; +option go_package = "ingester"; + // Chunk is a {de,}serializable intermediate type for chunkDesc which allows // efficient loading/unloading to disk during WAL checkpoint recovery. message Chunk { - google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp flushedAt = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp lastUpdated = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp from = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp to = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp flushedAt = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp lastUpdated = 4 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; bool closed = 5; bool synced = 6; // data to be unmarshaled into a MemChunk @@ -28,15 +40,24 @@ message Series { string userID = 1; // post mapped fingerprint is necessary because subsequent wal writes will reference it. uint64 fingerprint = 2; - repeated logproto.LegacyLabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter"]; + repeated logproto.LegacyLabelPair labels = 3 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" + ]; repeated Chunk chunks = 4 [(gogoproto.nullable) = false]; // most recently pushed timestamp. - google.protobuf.Timestamp to = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp to = 5 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; // most recently pushed line. string lastLine = 6; // highest counter value for pushes to this stream. // Used to skip already applied entries during WAL replay. int64 entryCt = 7; // highest timestamp pushed to this stream. - google.protobuf.Timestamp highestTs = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp highestTs = 8 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; } diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go index 79efd548c0b09..76471344e128d 100644 --- a/pkg/ingester/client/ingester.pb.go +++ b/pkg/ingester/client/ingester.pb.go @@ -1359,8 +1359,8 @@ var fileDescriptor_5b6c87318632a5b2 = []byte{ 0xf7, 0xa0, 0xd3, 0x70, 0xfc, 0x0e, 0xe3, 0x82, 0x85, 0x8d, 0x96, 0xeb, 0x30, 0x5f, 0xa4, 0xeb, 0xa5, 0x6e, 0x18, 0x88, 0x00, 0x43, 0xb2, 0xee, 0xee, 0x9a, 0xef, 0x76, 0x1c, 0xb1, 0x1f, 0xed, 0x2e, 0xb5, 0x02, 0xaf, 0xd1, 0x09, 0x3a, 0x41, 0x43, 0x42, 0x76, 0xa3, 0x3d, 0xb9, 0x92, 0x0b, - 0xf9, 0x4b, 0x7d, 0x6a, 0x9a, 0x31, 0xbd, 0x1b, 0x74, 0x94, 0xc3, 0x63, 0x22, 0x74, 0x5a, 0x5c, - 0xfb, 0xce, 0xe5, 0x7c, 0xc9, 0x0f, 0xe5, 0x24, 0x2b, 0x50, 0xb1, 0x19, 0x6d, 0xdb, 0xec, 0x61, + 0xf9, 0x4b, 0x7d, 0x6a, 0x9e, 0x8b, 0xe9, 0xdd, 0xa0, 0xa3, 0x1c, 0xc9, 0x0f, 0xed, 0x34, 0x73, + 0x4e, 0x8f, 0x89, 0xd0, 0x69, 0x71, 0xe5, 0x23, 0x2b, 0x50, 0xb1, 0x19, 0x6d, 0xdb, 0xec, 0x61, 0xc4, 0xb8, 0xc0, 0xcb, 0x30, 0xf5, 0x30, 0x62, 0xa1, 0xc3, 0xf8, 0x3c, 0xba, 0x30, 0xbe, 0x58, 0x59, 0x9e, 0x5f, 0xea, 0x8b, 0x5a, 0xba, 0x17, 0xb1, 0xf0, 0x50, 0x43, 0xed, 0x04, 0x48, 0x56, 0xa1, 0xaa, 0x28, 0x78, 0x37, 0xf0, 0x39, 0xc3, 0x57, 0x61, 0x2a, 0x64, 0x3c, 0x72, 0x45, 0xc2, @@ -1371,7 +1371,7 @@ var fileDescriptor_5b6c87318632a5b2 = []byte{ 0x8f, 0x0f, 0x1e, 0x71, 0x83, 0xee, 0x32, 0x77, 0x53, 0x01, 0xec, 0x14, 0x49, 0x7e, 0x41, 0x70, 0x72, 0xed, 0x1b, 0xe6, 0x75, 0x5d, 0x1a, 0xfe, 0x27, 0x32, 0xaf, 0x0d, 0xc8, 0x3c, 0x3b, 0x4c, 0x26, 0xcf, 0xe8, 0xbc, 0x0d, 0x33, 0xb9, 0x00, 0xe3, 0x0f, 0x01, 0xe4, 0x6e, 0xd9, 0x9c, 0x9e, - 0x5c, 0x4a, 0x8b, 0x20, 0xde, 0x72, 0x5b, 0xfa, 0x6e, 0x94, 0x1e, 0x3d, 0x5d, 0x30, 0xec, 0x0c, + 0x5c, 0x4a, 0x0b, 0x24, 0xde, 0x72, 0x5b, 0xfa, 0x6e, 0x94, 0x1e, 0x3d, 0x5d, 0x30, 0xec, 0x0c, 0x9a, 0xfc, 0x84, 0x60, 0x4e, 0xb2, 0x6d, 0x8b, 0x90, 0x51, 0x2f, 0xe5, 0x5c, 0x85, 0x4a, 0x6b, 0x3f, 0xf2, 0x0f, 0x72, 0xa4, 0xe7, 0xb2, 0xf2, 0xfa, 0xb4, 0xab, 0x31, 0x50, 0x73, 0x67, 0xbf, 0x2a, 0x08, 0x1b, 0x7b, 0x25, 0x61, 0xdb, 0x70, 0xaa, 0x90, 0x8c, 0xd7, 0x70, 0xda, 0x3f, 0x10, @@ -1390,16 +1390,16 @@ var fileDescriptor_5b6c87318632a5b2 = []byte{ 0x8c, 0x97, 0xeb, 0x6d, 0x7c, 0x05, 0x4a, 0x6d, 0x2a, 0xa8, 0x94, 0x59, 0x59, 0x3e, 0x9f, 0x2d, 0x87, 0x81, 0x00, 0xd8, 0x12, 0x4a, 0x6e, 0x03, 0x8e, 0x5d, 0x3c, 0xbf, 0xc3, 0x35, 0x98, 0xe0, 0xb1, 0x41, 0x5f, 0x90, 0x85, 0x22, 0x53, 0x41, 0x91, 0xad, 0xd0, 0xe4, 0x37, 0x04, 0xd6, 0xa6, - 0xea, 0x2c, 0x37, 0x83, 0x30, 0x5f, 0x81, 0x6f, 0xf8, 0x35, 0xfc, 0x08, 0xaa, 0x49, 0x89, 0x37, + 0x6a, 0x1e, 0x37, 0x83, 0x30, 0x5f, 0x81, 0x6f, 0xf8, 0x35, 0xfc, 0x08, 0xaa, 0x49, 0x89, 0x37, 0x39, 0x13, 0x2f, 0x7f, 0x11, 0x2b, 0x09, 0x7c, 0x9b, 0x09, 0x72, 0x1b, 0x16, 0x86, 0xea, 0xd6, - 0x21, 0x59, 0x84, 0x49, 0xd5, 0x34, 0x75, 0x4c, 0xea, 0xfd, 0x47, 0x43, 0x7d, 0x6a, 0x6b, 0x3f, + 0x21, 0x59, 0x84, 0x49, 0xd5, 0x17, 0x75, 0x4c, 0xea, 0xfd, 0x47, 0x43, 0x7d, 0x6a, 0x6b, 0x3f, 0x99, 0x87, 0xd3, 0x9a, 0x6c, 0x93, 0x09, 0x1a, 0x47, 0x39, 0xa9, 0xc4, 0x3b, 0x70, 0x66, 0xc0, 0xa3, 0xe9, 0xe3, 0xa6, 0xa3, 0x6d, 0x69, 0x5f, 0x2d, 0x6c, 0x90, 0x7e, 0x93, 0x22, 0xc9, 0x3f, 0x08, 0x4e, 0x14, 0x5e, 0xd2, 0x38, 0x66, 0x7b, 0x61, 0xe0, 0x35, 0x93, 0x93, 0xf7, 0xcb, 0xa4, 0x16, 0xdb, 0xd7, 0xb5, 0x79, 0xbd, 0x9d, 0xad, 0xa3, 0xb1, 0x5c, 0x1d, 0xb9, 0x30, 0x29, 0xef, 0x54, 0xbf, 0xb1, 0xa4, 0x52, 0x36, 0x58, 0x87, 0xb6, 0x0e, 0x65, 0x88, 0xee, 0x52, 0x27, 0xbc, 0x71, 0x3d, 0x7e, 0x25, 0xff, 0x7c, 0xba, 0x70, 0x25, 0x3b, 0x8d, 0x84, 0x74, 0x8f, 0xfa, 0xb4, - 0xe1, 0x06, 0x07, 0x4e, 0x23, 0x3b, 0x5a, 0xa8, 0x14, 0xac, 0xb4, 0x69, 0x57, 0xb0, 0xd0, 0xd6, + 0xe1, 0x06, 0x07, 0x4e, 0x23, 0x3b, 0x59, 0xa8, 0x14, 0xac, 0xb4, 0x69, 0x57, 0xb0, 0xd0, 0xd6, 0x7b, 0xe0, 0x06, 0x4c, 0xaa, 0x67, 0x7f, 0xbe, 0x24, 0x77, 0x9b, 0xcd, 0x26, 0x2d, 0xdb, 0x1d, 0x34, 0x8c, 0x7c, 0x8f, 0x60, 0x42, 0x9d, 0xf5, 0x4d, 0x55, 0x93, 0x09, 0xd3, 0xcc, 0x6f, 0x05, 0x6d, 0xc7, 0xef, 0xc8, 0xcb, 0x3c, 0x61, 0xa7, 0x6b, 0x8c, 0xf5, 0x25, 0x8b, 0x6f, 0x6d, 0x55, @@ -1428,7 +1428,7 @@ var fileDescriptor_5b6c87318632a5b2 = []byte{ 0xe2, 0x99, 0x85, 0xbe, 0xed, 0x59, 0xe8, 0xd7, 0x9e, 0x85, 0x1e, 0xf5, 0x2c, 0xf4, 0xb8, 0x67, 0xa1, 0xbf, 0x7a, 0x16, 0xfa, 0xbb, 0x67, 0x19, 0x2f, 0x7a, 0x16, 0xfa, 0xe1, 0xb9, 0x65, 0x3c, 0x7e, 0x6e, 0x19, 0x4f, 0x9e, 0x5b, 0xc6, 0x97, 0x97, 0x47, 0xb5, 0x98, 0xc2, 0x1f, 0xe7, 0xdd, - 0x49, 0x79, 0x91, 0xaf, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x59, 0x14, 0xfe, 0xa8, 0x56, 0x0f, + 0x49, 0x79, 0x91, 0xaf, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xb9, 0xeb, 0xf0, 0x56, 0x0f, 0x00, 0x00, } diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index c24cfc67637f1..547c228eb1823 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -2,30 +2,37 @@ syntax = "proto3"; package ingesterpb; -option go_package = "github.com/grafana/loki/pkg/ingester/client"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "pkg/logproto/metrics.proto"; import "pkg/logproto/logproto.proto"; +import "pkg/logproto/metrics.proto"; +option go_package = "github.com/grafana/loki/pkg/ingester/client"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; service Ingester { - rpc Push(logproto.WriteRequest) returns (logproto.WriteResponse) {}; - rpc Query(QueryRequest) returns (QueryResponse) {}; - rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {}; - rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {}; - - rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse) {}; - rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse) {}; - rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {}; - rpc AllUserStats(UserStatsRequest) returns (UsersStatsResponse) {}; - rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) returns (MetricsForLabelMatchersResponse) {}; - rpc MetricsMetadata(MetricsMetadataRequest) returns (MetricsMetadataResponse) {}; + rpc Push(logproto.WriteRequest) returns (logproto.WriteResponse) {} + + rpc Query(QueryRequest) returns (QueryResponse) {} + + rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {} + + rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {} + + rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse) {} + + rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse) {} + + rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {} + + rpc AllUserStats(UserStatsRequest) returns (UsersStatsResponse) {} + + rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) returns (MetricsForLabelMatchersResponse) {} + + rpc MetricsMetadata(MetricsMetadataRequest) returns (MetricsMetadataResponse) {} // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; + rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {} } message ReadRequest { @@ -110,8 +117,7 @@ message MetricsForLabelMatchersResponse { repeated logproto.Metric metric = 1; } -message MetricsMetadataRequest { -} +message MetricsMetadataRequest {} message MetricsMetadataResponse { repeated logproto.MetricMetadata metadata = 1; @@ -120,7 +126,10 @@ message MetricsMetadataResponse { message TimeSeriesChunk { string from_ingester_id = 1; string user_id = 2; - repeated logproto.LegacyLabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter"]; + repeated logproto.LegacyLabelPair labels = 3 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" + ]; repeated Chunk chunks = 4 [(gogoproto.nullable) = false]; } @@ -131,8 +140,7 @@ message Chunk { bytes data = 4; } -message TransferChunksResponse { -} +message TransferChunksResponse {} message LabelMatchers { repeated LabelMatcher matchers = 1; diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index dfa717e57b0b9..d90b70ebb3bca 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -1691,111 +1691,111 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 1654 bytes of a gzipped FileDescriptorProto + // 1653 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x49, 0x6f, 0x1b, 0xc9, 0x15, 0x66, 0x71, 0x69, 0x92, 0x8f, 0x8b, 0x88, 0x92, 0x2c, 0x71, 0x38, 0x33, 0x6c, 0x4e, 0x63, 0x30, 0x26, 0xbc, 0x90, 0xb1, 0xb2, 0xd8, 0x96, 0xb3, 0x40, 0xb4, 0x62, 0x5b, 0xb6, 0x12, 0xdb, 0x2d, 0x05, 0x06, 0x0c, 0x04, 0x46, 0x8b, 0x2c, 0x91, 0x0d, 0xb1, 0xd9, 0x74, 0x57, 0xd3, 0x80, 0x80, 0x00, 0xc9, 0x0f, 0x48, 0x00, 0xe7, 0x14, 0xe4, 0x9e, 0x43, 0x90, 0x43, 0x0e, 0xf9, 0x13, 0x71, 0x6e, 0x3e, 0x1a, 0x3e, 0x30, 0x31, 0x7d, 0x09, 0x88, 0x1c, 0xfc, 0x0b, 0x82, 0xa0, 0xb6, - 0x66, 0x91, 0x96, 0x62, 0xd3, 0x97, 0xb9, 0x88, 0xf5, 0x5e, 0xbd, 0xad, 0xbe, 0x7a, 0x4b, 0xb5, + 0x66, 0x91, 0x96, 0x62, 0xd3, 0x97, 0xb9, 0x88, 0xf5, 0x5e, 0xd5, 0xdb, 0xbe, 0x7a, 0x4b, 0xb5, 0xe0, 0xf3, 0xe1, 0x71, 0xb7, 0xd9, 0xf7, 0xbb, 0xc3, 0xc0, 0x0f, 0xfd, 0x68, 0xd1, 0xe0, 0x7f, - 0x71, 0x46, 0xd1, 0x15, 0xb3, 0xeb, 0xfb, 0xdd, 0x3e, 0x69, 0x72, 0xea, 0x70, 0x74, 0xd4, 0x0c, - 0x5d, 0x8f, 0xd0, 0xd0, 0xf1, 0x86, 0x42, 0xb4, 0x72, 0xb9, 0xeb, 0x86, 0xbd, 0xd1, 0x61, 0xa3, - 0xed, 0x7b, 0xcd, 0xae, 0xdf, 0xf5, 0x67, 0x92, 0x8c, 0x12, 0xd6, 0xd9, 0x4a, 0x8a, 0xd7, 0xa4, - 0xdb, 0xa7, 0x7d, 0xcf, 0xef, 0x90, 0x7e, 0x93, 0x86, 0x4e, 0x48, 0xc5, 0x5f, 0x21, 0x61, 0x3d, - 0x82, 0xdc, 0x83, 0x11, 0xed, 0xd9, 0xe4, 0xe9, 0x88, 0xd0, 0x10, 0xdf, 0x81, 0x34, 0x0d, 0x03, - 0xe2, 0x78, 0xb4, 0x8c, 0x6a, 0x89, 0x7a, 0x6e, 0x73, 0xa3, 0x11, 0x05, 0xbb, 0xcf, 0x37, 0xb6, - 0x3b, 0xce, 0x30, 0x24, 0x41, 0xeb, 0xdc, 0xeb, 0xb1, 0x69, 0x08, 0xd6, 0x74, 0x6c, 0x2a, 0x2d, - 0x5b, 0x2d, 0xac, 0x22, 0xe4, 0x85, 0x61, 0x3a, 0xf4, 0x07, 0x94, 0x58, 0x7f, 0x8f, 0x43, 0xfe, - 0xe1, 0x88, 0x04, 0x27, 0xca, 0x55, 0x05, 0x32, 0x94, 0xf4, 0x49, 0x3b, 0xf4, 0x83, 0x32, 0xaa, - 0xa1, 0x7a, 0xd6, 0x8e, 0x68, 0xbc, 0x06, 0xa9, 0xbe, 0xeb, 0xb9, 0x61, 0x39, 0x5e, 0x43, 0xf5, - 0x82, 0x2d, 0x08, 0xbc, 0x05, 0x29, 0x1a, 0x3a, 0x41, 0x58, 0x4e, 0xd4, 0x50, 0x3d, 0xb7, 0x59, - 0x69, 0x08, 0xb4, 0x1a, 0x0a, 0x83, 0xc6, 0x81, 0x42, 0xab, 0x95, 0x79, 0x31, 0x36, 0x63, 0xcf, - 0xff, 0x69, 0x22, 0x5b, 0xa8, 0xe0, 0x1f, 0x40, 0x82, 0x0c, 0x3a, 0xe5, 0xe4, 0x12, 0x9a, 0x4c, - 0x01, 0x5f, 0x81, 0x6c, 0xc7, 0x0d, 0x48, 0x3b, 0x74, 0xfd, 0x41, 0x39, 0x55, 0x43, 0xf5, 0xe2, - 0xe6, 0xea, 0x0c, 0x92, 0x1d, 0xb5, 0x65, 0xcf, 0xa4, 0xf0, 0x25, 0x30, 0x68, 0xcf, 0x09, 0x3a, - 0xb4, 0x9c, 0xae, 0x25, 0xea, 0xd9, 0xd6, 0xda, 0x74, 0x6c, 0x96, 0x04, 0xe7, 0x92, 0xef, 0xb9, - 0x21, 0xf1, 0x86, 0xe1, 0x89, 0x2d, 0x65, 0xf0, 0x05, 0x48, 0x77, 0x48, 0x9f, 0x84, 0x84, 0x96, - 0x33, 0x1c, 0xf1, 0x92, 0x66, 0x9e, 0x6f, 0xd8, 0x4a, 0xe0, 0x6e, 0x32, 0x63, 0x94, 0xd2, 0xd6, - 0x7f, 0x11, 0xe0, 0x7d, 0xc7, 0x1b, 0xf6, 0xc9, 0x47, 0xe3, 0x19, 0x21, 0x17, 0xff, 0x64, 0xe4, - 0x12, 0xcb, 0x22, 0x37, 0x83, 0x21, 0xb9, 0x1c, 0x0c, 0xa9, 0x0f, 0xc0, 0x60, 0xed, 0x81, 0x21, - 0x58, 0x1f, 0xca, 0xa1, 0xd9, 0x99, 0x13, 0xea, 0x34, 0xa5, 0xd9, 0x69, 0x12, 0x3c, 0x4e, 0xeb, - 0xd7, 0x50, 0x90, 0x38, 0x8a, 0x4c, 0xc5, 0xdb, 0x1f, 0x5d, 0x03, 0xc5, 0x17, 0x63, 0x13, 0xcd, - 0xea, 0x20, 0x4a, 0x7e, 0x7c, 0x91, 0xfb, 0x0e, 0xa9, 0xc4, 0x7b, 0xa5, 0x21, 0x4a, 0x6e, 0x77, - 0xd0, 0x25, 0x94, 0x29, 0x26, 0x19, 0x54, 0xb6, 0x90, 0xb1, 0x7e, 0x05, 0xab, 0x73, 0xd7, 0x29, - 0xc3, 0xb8, 0x06, 0x06, 0x25, 0x81, 0x4b, 0x54, 0x14, 0x1a, 0x20, 0xfb, 0x9c, 0xaf, 0xb9, 0xe7, - 0xb4, 0x2d, 0xe5, 0x97, 0xf3, 0xfe, 0x57, 0x04, 0xf9, 0x3d, 0xe7, 0x90, 0xf4, 0x55, 0x1e, 0x61, - 0x48, 0x0e, 0x1c, 0x8f, 0x48, 0x3c, 0xf9, 0x1a, 0xaf, 0x83, 0xf1, 0xcc, 0xe9, 0x8f, 0x88, 0x30, - 0x99, 0xb1, 0x25, 0xb5, 0x6c, 0x45, 0xa2, 0x4f, 0xae, 0x48, 0x14, 0xe5, 0x95, 0x75, 0x1e, 0x0a, - 0x32, 0x5e, 0x09, 0xd4, 0x2c, 0x38, 0x06, 0x54, 0x56, 0x05, 0x67, 0xfd, 0x1e, 0x41, 0x61, 0xee, - 0xbe, 0xb0, 0x05, 0x46, 0x9f, 0xa9, 0x52, 0x71, 0xb8, 0x16, 0x4c, 0xc7, 0xa6, 0xe4, 0xd8, 0xf2, - 0x97, 0xdd, 0x3e, 0x19, 0x84, 0x1c, 0xf7, 0x38, 0xc7, 0x7d, 0x7d, 0x86, 0xfb, 0x4f, 0x07, 0x61, - 0x70, 0xa2, 0x2e, 0x7f, 0x85, 0xa1, 0xc8, 0x5a, 0x9f, 0x14, 0xb7, 0xd5, 0x02, 0x7f, 0x06, 0xc9, - 0x9e, 0x43, 0x7b, 0x1c, 0x94, 0x64, 0x2b, 0x35, 0x1d, 0x9b, 0xe8, 0xb2, 0xcd, 0x59, 0xd6, 0x33, - 0xc8, 0xeb, 0x46, 0xf0, 0x1d, 0xc8, 0x46, 0x2d, 0x9e, 0x07, 0xf5, 0xff, 0xa1, 0x28, 0x4a, 0x9f, - 0xf1, 0x90, 0x72, 0x40, 0x66, 0xca, 0xf8, 0x0b, 0x48, 0xf6, 0xdd, 0x01, 0xe1, 0x17, 0x94, 0x6d, - 0x65, 0xa6, 0x63, 0x93, 0xd3, 0x36, 0xff, 0x6b, 0x79, 0x60, 0x88, 0x1c, 0xc3, 0x5f, 0x2f, 0x7a, - 0x4c, 0xb4, 0x0c, 0x61, 0x51, 0xb7, 0x66, 0x42, 0x8a, 0xa3, 0xc8, 0xcd, 0xa1, 0x56, 0x76, 0x3a, - 0x36, 0x05, 0xc3, 0x16, 0x3f, 0xcc, 0x9d, 0x76, 0x46, 0xee, 0x8e, 0xd1, 0xf2, 0x98, 0xb7, 0x21, - 0xbf, 0x47, 0xba, 0x4e, 0xfb, 0x44, 0x3a, 0x5d, 0x53, 0xe6, 0x98, 0x43, 0xa4, 0x6c, 0x7c, 0x05, - 0xf9, 0xc8, 0xe3, 0x13, 0x8f, 0xca, 0x42, 0xcd, 0x45, 0xbc, 0x9f, 0x51, 0xeb, 0x8f, 0x08, 0x64, - 0x76, 0x7f, 0xd4, 0xe5, 0xdd, 0x80, 0x34, 0xe5, 0x1e, 0xd5, 0xe5, 0xe9, 0x45, 0xc3, 0x37, 0x66, - 0xd7, 0x26, 0x05, 0x6d, 0xb5, 0xc0, 0x0d, 0x00, 0x51, 0xbf, 0x77, 0x66, 0x07, 0x2b, 0x4e, 0xc7, - 0xa6, 0xc6, 0xb5, 0xb5, 0xb5, 0xf5, 0x07, 0x04, 0xb9, 0x03, 0xc7, 0x8d, 0x0a, 0x67, 0x0d, 0x52, - 0x4f, 0x59, 0x05, 0xcb, 0xca, 0x11, 0x04, 0x6b, 0x51, 0x1d, 0xd2, 0x77, 0x4e, 0x6e, 0xf9, 0x01, - 0xb7, 0x59, 0xb0, 0x23, 0x7a, 0x36, 0xe6, 0x92, 0xa7, 0x8e, 0xb9, 0xd4, 0xd2, 0xcd, 0xfa, 0x6e, - 0x32, 0x13, 0x2f, 0x25, 0xac, 0xdf, 0x22, 0xc8, 0x8b, 0xc8, 0x64, 0x89, 0xdc, 0x00, 0x43, 0x04, - 0x2e, 0x73, 0xec, 0xcc, 0x8e, 0x06, 0x5a, 0x37, 0x93, 0x2a, 0xf8, 0x27, 0x50, 0xec, 0x04, 0xfe, - 0x70, 0x48, 0x3a, 0xfb, 0xb2, 0x2d, 0xc6, 0x17, 0xdb, 0xe2, 0x8e, 0xbe, 0x6f, 0x2f, 0x88, 0x5b, - 0xff, 0x60, 0x85, 0x28, 0x5a, 0x94, 0x84, 0x2a, 0x3a, 0x22, 0xfa, 0xe4, 0x79, 0x14, 0x5f, 0x76, - 0x1e, 0xad, 0x83, 0xd1, 0x0d, 0xfc, 0xd1, 0x90, 0x96, 0x13, 0xa2, 0x4d, 0x08, 0x6a, 0xb9, 0x39, - 0x65, 0xdd, 0x85, 0xa2, 0x3a, 0xca, 0x19, 0x7d, 0xba, 0xb2, 0xd8, 0xa7, 0x77, 0x3b, 0x64, 0x10, - 0xba, 0x47, 0x6e, 0xd4, 0x79, 0xa5, 0xbc, 0xf5, 0x3b, 0x04, 0xa5, 0x45, 0x11, 0xfc, 0x63, 0x2d, - 0xcd, 0x99, 0xb9, 0x6f, 0xce, 0x36, 0xd7, 0xe0, 0x7d, 0x90, 0xf2, 0x86, 0xa2, 0x4a, 0xa0, 0x72, - 0x1d, 0x72, 0x1a, 0x9b, 0xcd, 0xbb, 0x63, 0xa2, 0x52, 0x92, 0x2d, 0x67, 0xb5, 0x18, 0x17, 0x69, - 0xca, 0x89, 0xad, 0xf8, 0x35, 0xc4, 0x12, 0xba, 0x30, 0x77, 0x93, 0xf8, 0x1a, 0x24, 0x8f, 0x02, - 0xdf, 0x5b, 0xea, 0x9a, 0xb8, 0x06, 0xfe, 0x1e, 0xc4, 0x43, 0x7f, 0xa9, 0x4b, 0x8a, 0x87, 0x3e, - 0xbb, 0x23, 0x79, 0xf8, 0x04, 0x0f, 0x4e, 0x52, 0xd6, 0x5f, 0x10, 0xac, 0x30, 0x1d, 0x81, 0xc0, - 0xcd, 0xde, 0x68, 0x70, 0x8c, 0xeb, 0x50, 0x62, 0x9e, 0x9e, 0xb8, 0x72, 0xac, 0x3d, 0x71, 0x3b, - 0xf2, 0x98, 0x45, 0xc6, 0x57, 0xd3, 0x6e, 0xb7, 0x83, 0x37, 0x20, 0x3d, 0xa2, 0x42, 0x40, 0x9c, - 0xd9, 0x60, 0xe4, 0x6e, 0x07, 0x5f, 0xd4, 0xdc, 0x31, 0xac, 0xb5, 0x97, 0x1d, 0xc7, 0xf0, 0x81, - 0xe3, 0x06, 0x51, 0x6f, 0x39, 0x0f, 0x46, 0x9b, 0x39, 0x16, 0x79, 0xc2, 0xc6, 0x6a, 0x24, 0xcc, - 0x03, 0xb2, 0xe5, 0xb6, 0xf5, 0x7d, 0xc8, 0x46, 0xda, 0xa7, 0x4e, 0xd3, 0x53, 0x6f, 0xc0, 0xba, - 0x01, 0x2b, 0xa2, 0x67, 0x9e, 0xae, 0x9c, 0x3f, 0x4d, 0x39, 0xaf, 0x94, 0x3f, 0x87, 0x94, 0x40, - 0x05, 0x43, 0xb2, 0xe3, 0x84, 0x8e, 0x52, 0x61, 0x6b, 0xab, 0x0c, 0xeb, 0x07, 0x81, 0x33, 0xa0, - 0x47, 0x24, 0xe0, 0x42, 0x51, 0xee, 0x5a, 0xe7, 0x60, 0x95, 0xf5, 0x09, 0x12, 0xd0, 0x9b, 0xfe, - 0x68, 0x10, 0xca, 0xf2, 0xb4, 0x2e, 0xc1, 0xda, 0x3c, 0x5b, 0xa6, 0xfa, 0x1a, 0xa4, 0xda, 0x8c, - 0xc1, 0xad, 0x17, 0x6c, 0x41, 0x58, 0x7f, 0x42, 0x80, 0x6f, 0x93, 0x90, 0x9b, 0xde, 0xdd, 0xa1, - 0xda, 0x7b, 0xd4, 0x73, 0xc2, 0x76, 0x8f, 0x04, 0x54, 0xbd, 0xcd, 0x14, 0xfd, 0x6d, 0xbc, 0x47, - 0xad, 0x2b, 0xb0, 0x3a, 0x17, 0xa5, 0x3c, 0x53, 0x05, 0x32, 0x6d, 0xc9, 0x93, 0xef, 0x87, 0x88, - 0xb6, 0xfe, 0x16, 0x87, 0x8c, 0xb8, 0x5b, 0x72, 0x84, 0xaf, 0x40, 0xee, 0x88, 0xe5, 0x5a, 0x30, - 0x0c, 0x5c, 0x09, 0x41, 0xb2, 0xb5, 0x32, 0x1d, 0x9b, 0x3a, 0xdb, 0xd6, 0x09, 0x7c, 0x79, 0x21, - 0xf1, 0x5a, 0x6b, 0x93, 0xb1, 0x69, 0xfc, 0x82, 0x25, 0xdf, 0x0e, 0x9b, 0x5e, 0x3c, 0x0d, 0x77, - 0xa2, 0x74, 0xbc, 0x27, 0xab, 0x8d, 0x3f, 0x4e, 0x5b, 0x57, 0x59, 0xf8, 0xaf, 0xc7, 0xe6, 0x79, - 0xed, 0x93, 0x6f, 0x18, 0xf8, 0x1e, 0x09, 0x7b, 0x64, 0x44, 0x9b, 0x6d, 0xdf, 0xf3, 0xfc, 0x41, - 0x93, 0x7f, 0xd7, 0xf1, 0x43, 0xb3, 0x11, 0xcc, 0xd4, 0x65, 0x01, 0x1e, 0x40, 0x3a, 0xec, 0x05, - 0xfe, 0xa8, 0xdb, 0xe3, 0xd3, 0x25, 0xd1, 0xda, 0x5a, 0xde, 0x9e, 0xb2, 0x60, 0xab, 0x05, 0xfe, - 0x8a, 0xa1, 0x45, 0xda, 0xc7, 0x74, 0xe4, 0xf1, 0xf1, 0x54, 0x50, 0xcf, 0x9b, 0x88, 0x7d, 0xe1, - 0x1b, 0xc8, 0x46, 0x9f, 0x45, 0x38, 0x07, 0xe9, 0x5b, 0xf7, 0xed, 0x47, 0xdb, 0xf6, 0x4e, 0x29, - 0x86, 0xf3, 0x90, 0x69, 0x6d, 0xdf, 0xbc, 0xc7, 0x29, 0xb4, 0xb9, 0x0d, 0x06, 0xfb, 0x40, 0x24, - 0x01, 0xbe, 0x0a, 0x49, 0xb6, 0xc2, 0xe7, 0x66, 0x15, 0xa5, 0x7d, 0x93, 0x56, 0xd6, 0x17, 0xd9, - 0x32, 0x79, 0x63, 0x9b, 0xff, 0x49, 0x40, 0x9a, 0x3d, 0x9a, 0x59, 0xdf, 0xfc, 0x21, 0xa4, 0xf8, - 0xfb, 0x19, 0x6b, 0xe2, 0xfa, 0xf7, 0x51, 0x65, 0xe3, 0x3d, 0xbe, 0xb2, 0xf3, 0x1d, 0x84, 0x7f, - 0x0e, 0x39, 0xce, 0x94, 0xef, 0x95, 0x2f, 0x16, 0x9f, 0x0d, 0x73, 0x96, 0xbe, 0x3c, 0x63, 0x57, - 0xb3, 0xb7, 0x05, 0x29, 0x5e, 0xc6, 0x7a, 0x34, 0xfa, 0x2b, 0x5b, 0x8f, 0x66, 0xee, 0x35, 0x6b, - 0xc5, 0xf0, 0x75, 0x48, 0xb2, 0xea, 0xd3, 0xe1, 0xd0, 0x9e, 0x19, 0x3a, 0x1c, 0xfa, 0x8c, 0xe7, - 0x6e, 0x7f, 0x14, 0xbd, 0x96, 0x36, 0x16, 0xc7, 0x86, 0x52, 0x2f, 0xbf, 0xbf, 0x11, 0x79, 0xbe, - 0x2f, 0x9e, 0x0d, 0xaa, 0xee, 0xf1, 0x97, 0xf3, 0xae, 0x16, 0xda, 0x44, 0xa5, 0x7a, 0xd6, 0x76, - 0x64, 0x70, 0x0f, 0x72, 0x5a, 0xcd, 0xe9, 0xb0, 0xbe, 0xdf, 0x30, 0x74, 0x58, 0x4f, 0x29, 0x54, - 0x2b, 0xb6, 0xf9, 0x4b, 0xc8, 0xa8, 0xae, 0x8e, 0x1f, 0x42, 0x71, 0xbe, 0xa7, 0xe1, 0xcf, 0xb4, - 0x68, 0xe6, 0x47, 0x45, 0xa5, 0xa6, 0x6d, 0x9d, 0xde, 0x08, 0x63, 0x75, 0xd4, 0x7a, 0xfc, 0xf2, - 0x4d, 0x35, 0xf6, 0xea, 0x4d, 0x35, 0xf6, 0xee, 0x4d, 0x15, 0xfd, 0x66, 0x52, 0x45, 0x7f, 0x9e, - 0x54, 0xd1, 0x8b, 0x49, 0x15, 0xbd, 0x9c, 0x54, 0xd1, 0xbf, 0x26, 0x55, 0xf4, 0xef, 0x49, 0x35, - 0xf6, 0x6e, 0x52, 0x45, 0xcf, 0xdf, 0x56, 0x63, 0x2f, 0xdf, 0x56, 0x63, 0xaf, 0xde, 0x56, 0x63, - 0x8f, 0xbf, 0xd6, 0xff, 0x23, 0x13, 0x38, 0x47, 0xce, 0xc0, 0x69, 0xf6, 0xfd, 0x63, 0xb7, 0xa9, - 0xff, 0xc7, 0xe7, 0xd0, 0xe0, 0x3f, 0xdf, 0xfd, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xf9, - 0x78, 0x9a, 0x08, 0x12, 0x00, 0x00, + 0x71, 0x46, 0xd1, 0x95, 0xcb, 0x5d, 0x37, 0xec, 0x8d, 0x0e, 0x1b, 0x6d, 0xdf, 0x6b, 0x76, 0xfd, + 0xae, 0xdf, 0xe4, 0xec, 0xc3, 0xd1, 0x11, 0xa7, 0x84, 0x30, 0x5b, 0x09, 0xc1, 0x8a, 0xd9, 0xf5, + 0xfd, 0x6e, 0x9f, 0xcc, 0x4e, 0x85, 0xae, 0x47, 0x68, 0xe8, 0x78, 0x43, 0x79, 0xa0, 0x26, 0xcd, + 0x3e, 0xed, 0x7b, 0x7e, 0x87, 0xf4, 0x9b, 0x34, 0x74, 0x42, 0x2a, 0xfe, 0x8a, 0x13, 0xd6, 0x23, + 0xc8, 0x3d, 0x18, 0xd1, 0x9e, 0x4d, 0x9e, 0x8e, 0x08, 0x0d, 0xf1, 0x1d, 0x48, 0xd3, 0x30, 0x20, + 0x8e, 0x47, 0xcb, 0xa8, 0x96, 0xa8, 0xe7, 0x36, 0x37, 0x1a, 0x91, 0xb3, 0xfb, 0x7c, 0x63, 0xbb, + 0xe3, 0x0c, 0x43, 0x12, 0xb4, 0xce, 0xbd, 0x1e, 0x9b, 0x86, 0x60, 0x4d, 0xc7, 0xa6, 0x92, 0xb2, + 0xd5, 0xc2, 0x2a, 0x42, 0x5e, 0x28, 0xa6, 0x43, 0x7f, 0x40, 0x89, 0xf5, 0xf7, 0x38, 0xe4, 0x1f, + 0x8e, 0x48, 0x70, 0xa2, 0x4c, 0x55, 0x20, 0x43, 0x49, 0x9f, 0xb4, 0x43, 0x3f, 0x28, 0xa3, 0x1a, + 0xaa, 0x67, 0xed, 0x88, 0xc6, 0x6b, 0x90, 0xea, 0xbb, 0x9e, 0x1b, 0x96, 0xe3, 0x35, 0x54, 0x2f, + 0xd8, 0x82, 0xc0, 0x5b, 0x90, 0xa2, 0xa1, 0x13, 0x84, 0xe5, 0x44, 0x0d, 0xd5, 0x73, 0x9b, 0x95, + 0x86, 0x08, 0xbf, 0xa1, 0xc2, 0x6f, 0x1c, 0xa8, 0xf0, 0x5b, 0x99, 0x17, 0x63, 0x33, 0xf6, 0xfc, + 0x9f, 0x26, 0xb2, 0x85, 0x08, 0xfe, 0x01, 0x24, 0xc8, 0xa0, 0x53, 0x4e, 0x2e, 0x21, 0xc9, 0x04, + 0xf0, 0x15, 0xc8, 0x76, 0xdc, 0x80, 0xb4, 0x43, 0xd7, 0x1f, 0x94, 0x53, 0x35, 0x54, 0x2f, 0x6e, + 0xae, 0xce, 0x20, 0xd9, 0x51, 0x5b, 0xf6, 0xec, 0x14, 0xbe, 0x04, 0x06, 0xed, 0x39, 0x41, 0x87, + 0x96, 0xd3, 0xb5, 0x44, 0x3d, 0xdb, 0x5a, 0x9b, 0x8e, 0xcd, 0x92, 0xe0, 0x5c, 0xf2, 0x3d, 0x37, + 0x24, 0xde, 0x30, 0x3c, 0xb1, 0xe5, 0x19, 0x7c, 0x01, 0xd2, 0x1d, 0xd2, 0x27, 0x21, 0xa1, 0xe5, + 0x0c, 0x47, 0xbc, 0xa4, 0xa9, 0xe7, 0x1b, 0xb6, 0x3a, 0x70, 0x37, 0x99, 0x31, 0x4a, 0x69, 0xeb, + 0xbf, 0x08, 0xf0, 0xbe, 0xe3, 0x0d, 0xfb, 0xe4, 0xa3, 0xf1, 0x8c, 0x90, 0x8b, 0x7f, 0x32, 0x72, + 0x89, 0x65, 0x91, 0x9b, 0xc1, 0x90, 0x5c, 0x0e, 0x86, 0xd4, 0x07, 0x60, 0xb0, 0xf6, 0xc0, 0x10, + 0xac, 0x0f, 0xe5, 0xd0, 0x2c, 0xe6, 0x84, 0x8a, 0xa6, 0x34, 0x8b, 0x26, 0xc1, 0xfd, 0xb4, 0x7e, + 0x0d, 0x05, 0x89, 0xa3, 0xc8, 0x54, 0xbc, 0xfd, 0xd1, 0x35, 0x50, 0x7c, 0x31, 0x36, 0xd1, 0xac, + 0x0e, 0xa2, 0xe4, 0xc7, 0x17, 0xb9, 0xed, 0x90, 0x4a, 0xbc, 0x57, 0x1a, 0xa2, 0xe4, 0x76, 0x07, + 0x5d, 0x42, 0x99, 0x60, 0x92, 0x41, 0x65, 0x8b, 0x33, 0xd6, 0xaf, 0x60, 0x75, 0xee, 0x3a, 0xa5, + 0x1b, 0xd7, 0xc0, 0xa0, 0x24, 0x70, 0x89, 0xf2, 0x42, 0x03, 0x64, 0x9f, 0xf3, 0x35, 0xf3, 0x9c, + 0xb6, 0xe5, 0xf9, 0xe5, 0xac, 0xff, 0x15, 0x41, 0x7e, 0xcf, 0x39, 0x24, 0x7d, 0x95, 0x47, 0x18, + 0x92, 0x03, 0xc7, 0x23, 0x12, 0x4f, 0xbe, 0xc6, 0xeb, 0x60, 0x3c, 0x73, 0xfa, 0x23, 0x22, 0x54, + 0x66, 0x6c, 0x49, 0x2d, 0x5b, 0x91, 0xe8, 0x93, 0x2b, 0x12, 0x45, 0x79, 0x65, 0x9d, 0x87, 0x82, + 0xf4, 0x57, 0x02, 0x35, 0x73, 0x8e, 0x01, 0x95, 0x55, 0xce, 0x59, 0xbf, 0x47, 0x50, 0x98, 0xbb, + 0x2f, 0x6c, 0x81, 0xd1, 0x67, 0xa2, 0x54, 0x04, 0xd7, 0x82, 0xe9, 0xd8, 0x94, 0x1c, 0x5b, 0xfe, + 0xb2, 0xdb, 0x27, 0x83, 0x90, 0xe3, 0x1e, 0xe7, 0xb8, 0xaf, 0xcf, 0x70, 0xff, 0xe9, 0x20, 0x0c, + 0x4e, 0xd4, 0xe5, 0xaf, 0x30, 0x14, 0x59, 0xeb, 0x93, 0xc7, 0x6d, 0xb5, 0xc0, 0x9f, 0x41, 0xb2, + 0xe7, 0xd0, 0x1e, 0x07, 0x25, 0xd9, 0x4a, 0x4d, 0xc7, 0x26, 0xba, 0x6c, 0x73, 0x96, 0xf5, 0x0c, + 0xf2, 0xba, 0x12, 0x7c, 0x07, 0xb2, 0x51, 0xcf, 0xe6, 0x4e, 0xfd, 0x7f, 0x28, 0x8a, 0xd2, 0x66, + 0x3c, 0xa4, 0x1c, 0x90, 0x99, 0x30, 0xfe, 0x02, 0x92, 0x7d, 0x77, 0x40, 0xf8, 0x05, 0x65, 0x5b, + 0x99, 0xe9, 0xd8, 0xe4, 0xb4, 0xcd, 0xff, 0x5a, 0x1e, 0x18, 0x22, 0xc7, 0xf0, 0xd7, 0x8b, 0x16, + 0x13, 0x2d, 0x43, 0x68, 0xd4, 0xb5, 0x99, 0x90, 0xe2, 0x28, 0x72, 0x75, 0xa8, 0x95, 0x9d, 0x8e, + 0x4d, 0xc1, 0xb0, 0xc5, 0x0f, 0x33, 0xa7, 0xc5, 0xc8, 0xcd, 0x31, 0x5a, 0x86, 0x79, 0x1b, 0xf2, + 0x7b, 0xa4, 0xeb, 0xb4, 0x4f, 0xa4, 0xd1, 0x35, 0xa5, 0x8e, 0x19, 0x44, 0x4a, 0xc7, 0x57, 0x90, + 0x8f, 0x2c, 0x3e, 0xf1, 0xa8, 0x2c, 0xd4, 0x5c, 0xc4, 0xfb, 0x19, 0xb5, 0xfe, 0x88, 0x40, 0x66, + 0xf7, 0x47, 0x5d, 0xde, 0x0d, 0x48, 0x53, 0x6e, 0x51, 0x5d, 0x9e, 0x5e, 0x34, 0x7c, 0x63, 0x76, + 0x6d, 0xf2, 0xa0, 0xad, 0x16, 0xb8, 0x01, 0x20, 0xea, 0xf7, 0xce, 0x2c, 0xb0, 0xe2, 0x74, 0x6c, + 0x6a, 0x5c, 0x5b, 0x5b, 0x5b, 0x7f, 0x40, 0x90, 0x3b, 0x70, 0xdc, 0xa8, 0x70, 0xd6, 0x20, 0xf5, + 0x94, 0x55, 0xb0, 0xac, 0x1c, 0x41, 0xb0, 0x16, 0xd5, 0x21, 0x7d, 0xe7, 0xe4, 0x96, 0x1f, 0x70, + 0x9d, 0x05, 0x3b, 0xa2, 0x67, 0x63, 0x2e, 0x79, 0xea, 0x98, 0x4b, 0x2d, 0xdd, 0xac, 0xef, 0x26, + 0x33, 0xf1, 0x52, 0xc2, 0xfa, 0x2d, 0x82, 0xbc, 0xf0, 0x4c, 0x96, 0xc8, 0x0d, 0x30, 0x84, 0xe3, + 0x32, 0xc7, 0xce, 0xec, 0x68, 0xa0, 0x75, 0x33, 0x29, 0x82, 0x7f, 0x02, 0xc5, 0x4e, 0xe0, 0x0f, + 0x87, 0xa4, 0xb3, 0x2f, 0xdb, 0x62, 0x7c, 0xb1, 0x2d, 0xee, 0xe8, 0xfb, 0xf6, 0xc2, 0x71, 0xeb, + 0x1f, 0xac, 0x10, 0x45, 0x8b, 0x92, 0x50, 0x45, 0x21, 0xa2, 0x4f, 0x9e, 0x47, 0xf1, 0x65, 0xe7, + 0xd1, 0x3a, 0x18, 0xdd, 0xc0, 0x1f, 0x0d, 0x69, 0x39, 0x21, 0xda, 0x84, 0xa0, 0x96, 0x9b, 0x53, + 0xd6, 0x5d, 0x28, 0xaa, 0x50, 0xce, 0xe8, 0xd3, 0x95, 0xc5, 0x3e, 0xbd, 0xdb, 0x21, 0x83, 0xd0, + 0x3d, 0x72, 0xa3, 0xce, 0x2b, 0xcf, 0x5b, 0xbf, 0x43, 0x50, 0x5a, 0x3c, 0x82, 0x7f, 0xac, 0xa5, + 0x39, 0x53, 0xf7, 0xcd, 0xd9, 0xea, 0x1a, 0xbc, 0x0f, 0x52, 0xde, 0x50, 0x54, 0x09, 0x54, 0xae, + 0x43, 0x4e, 0x63, 0xb3, 0x79, 0x77, 0x4c, 0x54, 0x4a, 0xb2, 0xe5, 0xac, 0x16, 0xe3, 0x22, 0x4d, + 0x39, 0xb1, 0x15, 0xbf, 0x86, 0x58, 0x42, 0x17, 0xe6, 0x6e, 0x12, 0x5f, 0x83, 0xe4, 0x51, 0xe0, + 0x7b, 0x4b, 0x5d, 0x13, 0x97, 0xc0, 0xdf, 0x83, 0x78, 0xe8, 0x2f, 0x75, 0x49, 0xf1, 0xd0, 0x67, + 0x77, 0x24, 0x83, 0x4f, 0x70, 0xe7, 0x24, 0x65, 0xfd, 0x05, 0xc1, 0x0a, 0x93, 0x11, 0x08, 0xdc, + 0xec, 0x8d, 0x06, 0xc7, 0xb8, 0x0e, 0x25, 0x66, 0xe9, 0x89, 0x2b, 0xc7, 0xda, 0x13, 0xb7, 0x23, + 0xc3, 0x2c, 0x32, 0xbe, 0x9a, 0x76, 0xbb, 0x1d, 0xbc, 0x01, 0xe9, 0x11, 0x15, 0x07, 0x44, 0xcc, + 0x06, 0x23, 0x77, 0x3b, 0xf8, 0xa2, 0x66, 0x8e, 0x61, 0xad, 0xbd, 0xec, 0x38, 0x86, 0x0f, 0x1c, + 0x37, 0x88, 0x7a, 0xcb, 0x79, 0x30, 0xda, 0xcc, 0xb0, 0xc8, 0x13, 0x36, 0x56, 0xa3, 0xc3, 0xdc, + 0x21, 0x5b, 0x6e, 0x5b, 0xdf, 0x87, 0x6c, 0x24, 0x7d, 0xea, 0x34, 0x3d, 0xf5, 0x06, 0xac, 0x1b, + 0xb0, 0x22, 0x7a, 0xe6, 0xe9, 0xc2, 0xf9, 0xd3, 0x84, 0xf3, 0x4a, 0xf8, 0x73, 0x48, 0x09, 0x54, + 0x30, 0x24, 0x3b, 0x4e, 0xe8, 0x28, 0x11, 0xb6, 0xb6, 0xca, 0xb0, 0x7e, 0x10, 0x38, 0x03, 0x7a, + 0x44, 0x02, 0x7e, 0x28, 0xca, 0x5d, 0xeb, 0x1c, 0xac, 0xb2, 0x3e, 0x41, 0x02, 0x7a, 0xd3, 0x1f, + 0x0d, 0x42, 0x59, 0x9e, 0xd6, 0x25, 0x58, 0x9b, 0x67, 0xcb, 0x54, 0x5f, 0x83, 0x54, 0x9b, 0x31, + 0xb8, 0xf6, 0x82, 0x2d, 0x08, 0xeb, 0x4f, 0x08, 0xf0, 0x6d, 0x12, 0x72, 0xd5, 0xbb, 0x3b, 0x54, + 0x7b, 0x8f, 0x7a, 0x4e, 0xd8, 0xee, 0x91, 0x80, 0xaa, 0xb7, 0x99, 0xa2, 0xbf, 0x8d, 0xf7, 0xa8, + 0x75, 0x05, 0x56, 0xe7, 0xbc, 0x94, 0x31, 0x55, 0x20, 0xd3, 0x96, 0x3c, 0xf9, 0x7e, 0x88, 0x68, + 0xeb, 0x6f, 0x71, 0xc8, 0x88, 0xbb, 0x25, 0x47, 0xf8, 0x0a, 0xe4, 0x8e, 0x58, 0xae, 0x05, 0xc3, + 0xc0, 0x95, 0x10, 0x24, 0x5b, 0x2b, 0xd3, 0xb1, 0xa9, 0xb3, 0x6d, 0x9d, 0xc0, 0x97, 0x17, 0x12, + 0xaf, 0xb5, 0x36, 0x19, 0x9b, 0xc6, 0x2f, 0x58, 0xf2, 0xed, 0xb0, 0xe9, 0xc5, 0xd3, 0x70, 0x27, + 0x4a, 0xc7, 0x7b, 0xb2, 0xda, 0xf8, 0xe3, 0xb4, 0x75, 0x95, 0xb9, 0xff, 0x7a, 0x6c, 0x9e, 0xd7, + 0xbe, 0x09, 0x87, 0x81, 0xef, 0x91, 0xb0, 0x47, 0x46, 0xb4, 0xd9, 0xf6, 0x3d, 0xcf, 0x1f, 0x34, + 0xf9, 0x77, 0x1d, 0x0f, 0x9a, 0x8d, 0x60, 0x26, 0x2e, 0x0b, 0xf0, 0x00, 0xd2, 0x61, 0x2f, 0xf0, + 0x47, 0xdd, 0x1e, 0x9f, 0x2e, 0x89, 0xd6, 0xd6, 0xf2, 0xfa, 0x94, 0x06, 0x5b, 0x2d, 0xf0, 0x57, + 0x0c, 0x2d, 0xd2, 0x3e, 0xa6, 0x23, 0x8f, 0x8f, 0xa7, 0x82, 0x7a, 0xde, 0x44, 0xec, 0x0b, 0xdf, + 0x40, 0x36, 0xfa, 0x2c, 0xc2, 0x39, 0x48, 0xdf, 0xba, 0x6f, 0x3f, 0xda, 0xb6, 0x77, 0x4a, 0x31, + 0x9c, 0x87, 0x4c, 0x6b, 0xfb, 0xe6, 0x3d, 0x4e, 0xa1, 0xcd, 0x6d, 0x30, 0xd8, 0x07, 0x22, 0x09, + 0xf0, 0x55, 0x48, 0xb2, 0x15, 0x3e, 0x37, 0xab, 0x28, 0xed, 0x9b, 0xb4, 0xb2, 0xbe, 0xc8, 0x96, + 0xc9, 0x1b, 0xdb, 0xfc, 0x4f, 0x02, 0xd2, 0xec, 0xd1, 0xcc, 0xfa, 0xe6, 0x0f, 0x21, 0xc5, 0xdf, + 0xcf, 0x58, 0x3b, 0xae, 0x7f, 0x1f, 0x55, 0x36, 0xde, 0xe3, 0x2b, 0x3d, 0xdf, 0x41, 0xf8, 0xe7, + 0x90, 0xe3, 0x4c, 0xf9, 0x5e, 0xf9, 0x62, 0xf1, 0xd9, 0x30, 0xa7, 0xe9, 0xcb, 0x33, 0x76, 0x35, + 0x7d, 0x5b, 0x90, 0xe2, 0x65, 0xac, 0x7b, 0xa3, 0xbf, 0xb2, 0x75, 0x6f, 0xe6, 0x5e, 0xb3, 0x56, + 0x0c, 0x5f, 0x87, 0x24, 0xab, 0x3e, 0x1d, 0x0e, 0xed, 0x99, 0xa1, 0xc3, 0xa1, 0xcf, 0x78, 0x6e, + 0xf6, 0x47, 0xd1, 0x6b, 0x69, 0x63, 0x71, 0x6c, 0x28, 0xf1, 0xf2, 0xfb, 0x1b, 0x91, 0xe5, 0xfb, + 0xe2, 0xd9, 0xa0, 0xea, 0x1e, 0x7f, 0x39, 0x6f, 0x6a, 0xa1, 0x4d, 0x54, 0xaa, 0x67, 0x6d, 0x47, + 0x0a, 0xf7, 0x20, 0xa7, 0xd5, 0x9c, 0x0e, 0xeb, 0xfb, 0x0d, 0x43, 0x87, 0xf5, 0x94, 0x42, 0xb5, + 0x62, 0x9b, 0xbf, 0x84, 0x8c, 0xea, 0xea, 0xf8, 0x21, 0x14, 0xe7, 0x7b, 0x1a, 0xfe, 0x4c, 0xf3, + 0x66, 0x7e, 0x54, 0x54, 0x6a, 0xda, 0xd6, 0xe9, 0x8d, 0x30, 0x56, 0x47, 0xad, 0xc7, 0x2f, 0xdf, + 0x54, 0x63, 0xaf, 0xde, 0x54, 0x63, 0xef, 0xde, 0x54, 0xd1, 0x6f, 0x26, 0x55, 0xf4, 0xe7, 0x49, + 0x15, 0xbd, 0x98, 0x54, 0xd1, 0xcb, 0x49, 0x15, 0xfd, 0x6b, 0x52, 0x45, 0xff, 0x9e, 0x54, 0x63, + 0xef, 0x26, 0x55, 0xf4, 0xfc, 0x6d, 0x35, 0xf6, 0xf2, 0x6d, 0x35, 0xf6, 0xea, 0x6d, 0x35, 0xf6, + 0xf8, 0x6b, 0xfd, 0x5f, 0x36, 0x81, 0x73, 0xe4, 0x0c, 0x9c, 0x66, 0xdf, 0x3f, 0x76, 0x9b, 0xfa, + 0x7f, 0x7c, 0x0e, 0x0d, 0xfe, 0xf3, 0xdd, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xda, 0xa4, 0x4e, + 0xd8, 0x08, 0x12, 0x00, 0x00, } func (x Direction) String() string { diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index 0c454ad91442e..06d3dc67c7012 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -2,42 +2,56 @@ syntax = "proto3"; package logproto; -option go_package = "github.com/grafana/loki/pkg/logproto"; - -import "google/protobuf/timestamp.proto"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; import "pkg/logqlmodel/stats/stats.proto"; +option go_package = "github.com/grafana/loki/pkg/logproto"; + service Pusher { - rpc Push(PushRequest) returns (PushResponse) {}; + rpc Push(PushRequest) returns (PushResponse) {} } service Querier { - rpc Query(QueryRequest) returns (stream QueryResponse) {}; - rpc QuerySample(SampleQueryRequest) returns (stream SampleQueryResponse) {}; - rpc Label(LabelRequest) returns (LabelResponse) {}; - rpc Tail(TailRequest) returns (stream TailResponse) {}; - rpc Series(SeriesRequest) returns (SeriesResponse) {}; - rpc TailersCount(TailersCountRequest) returns (TailersCountResponse) {}; - rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) {}; // GetChunkIDs returns ChunkIDs from the index store holding logs for given selectors and time-range. + rpc Query(QueryRequest) returns (stream QueryResponse) {} + + rpc QuerySample(SampleQueryRequest) returns (stream SampleQueryResponse) {} + + rpc Label(LabelRequest) returns (LabelResponse) {} + + rpc Tail(TailRequest) returns (stream TailResponse) {} + + rpc Series(SeriesRequest) returns (SeriesResponse) {} + + rpc TailersCount(TailersCountRequest) returns (TailersCountResponse) {} + + rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) {} } service Ingester { - rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; + rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {} } message PushRequest { - repeated StreamAdapter streams = 1 [(gogoproto.jsontag) = "streams", (gogoproto.customtype) = "Stream"]; + repeated StreamAdapter streams = 1 [ + (gogoproto.jsontag) = "streams", + (gogoproto.customtype) = "Stream" + ]; } -message PushResponse { -} +message PushResponse {} message QueryRequest { string selector = 1; uint32 limit = 2; - google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 4 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; Direction direction = 5; reserved 6; repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"]; @@ -46,8 +60,14 @@ message QueryRequest { message SampleQueryRequest { string selector = 1; - google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; repeated Delete deletes = 5; } @@ -59,28 +79,37 @@ message Delete { } message QueryResponse { - repeated StreamAdapter streams = 1 [(gogoproto.customtype) = "Stream", (gogoproto.nullable) = true]; + repeated StreamAdapter streams = 1 [ + (gogoproto.customtype) = "Stream", + (gogoproto.nullable) = true + ]; stats.Ingester stats = 2 [(gogoproto.nullable) = false]; } message SampleQueryResponse { - repeated Series series = 1 [(gogoproto.customtype) = "Series", (gogoproto.nullable) = true]; + repeated Series series = 1 [ + (gogoproto.customtype) = "Series", + (gogoproto.nullable) = true + ]; stats.Ingester stats = 2 [(gogoproto.nullable) = false]; } - enum Direction { FORWARD = 0; BACKWARD = 1; } - - message LabelRequest { string name = 1; bool values = 2; // True to fetch label values, false for fetch labels names. - google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; - google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + google.protobuf.Timestamp start = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = true + ]; + google.protobuf.Timestamp end = 4 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = true + ]; } message LabelResponse { @@ -89,13 +118,20 @@ message LabelResponse { message StreamAdapter { string labels = 1 [(gogoproto.jsontag) = "labels"]; - repeated EntryAdapter entries = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "entries"]; + repeated EntryAdapter entries = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "entries" + ]; // hash contains the original hash of the stream. uint64 hash = 3 [(gogoproto.jsontag) = "-"]; } message EntryAdapter { - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false, (gogoproto.jsontag) = "ts"]; + google.protobuf.Timestamp timestamp = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "ts" + ]; string line = 2 [(gogoproto.jsontag) = "line"]; } @@ -107,13 +143,16 @@ message Sample { // LegacySample exists for backwards compatibility reasons and is deprecated. Do not use. message LegacySample { - double value = 1; + double value = 1; int64 timestamp_ms = 2; } message Series { string labels = 1 [(gogoproto.jsontag) = "labels"]; - repeated Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "samples"]; + repeated Sample samples = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "samples" + ]; uint64 streamHash = 3 [(gogoproto.jsontag) = "streamHash"]; } @@ -122,7 +161,10 @@ message TailRequest { reserved 2; uint32 delayFor = 3; uint32 limit = 4; - google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 5 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; } message TailResponse { @@ -131,8 +173,14 @@ message TailResponse { } message SeriesRequest { - google.protobuf.Timestamp start = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; repeated string groups = 3; repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; } @@ -142,12 +190,18 @@ message SeriesResponse { } message SeriesIdentifier { - map labels = 1; + map labels = 1; } message DroppedStream { - google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp from = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp to = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; string labels = 3; } @@ -173,13 +227,9 @@ message Chunk { bytes data = 1; } -message TransferChunksResponse { - -} +message TransferChunksResponse {} -message TailersCountRequest { - -} +message TailersCountRequest {} message TailersCountResponse { uint32 count = 1; @@ -187,8 +237,14 @@ message TailersCountResponse { message GetChunkIDsRequest { string matchers = 1; - google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; } message GetChunkIDsResponse { @@ -201,9 +257,20 @@ message GetChunkIDsResponse { // resulting Go struct. message ChunkRef { uint64 fingerprint = 1 [(gogoproto.jsontag) = "fingerprint"]; - string user_id = 2 [(gogoproto.customname) = "UserID", (gogoproto.jsontag) = "userID"]; - int64 from = 3 [(gogoproto.jsontag) = "from", (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - int64 through = 4 [(gogoproto.jsontag) = "through", (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; + string user_id = 2 [ + (gogoproto.customname) = "UserID", + (gogoproto.jsontag) = "userID" + ]; + int64 from = 3 [ + (gogoproto.jsontag) = "from", + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 4 [ + (gogoproto.jsontag) = "through", + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; // The checksum is not written to the external storage. We use crc32, // Castagnoli table. See http://www.evanjones.ca/crc32c.html. diff --git a/pkg/logproto/metrics.proto b/pkg/logproto/metrics.proto index 97e85ce5c761a..0c0aea283f4d2 100644 --- a/pkg/logproto/metrics.proto +++ b/pkg/logproto/metrics.proto @@ -2,16 +2,18 @@ syntax = "proto3"; package logproto; -option go_package = "github.com/grafana/loki/pkg/logproto"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "pkg/logproto/logproto.proto"; +option go_package = "github.com/grafana/loki/pkg/logproto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message WriteRequest { - repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"]; + repeated TimeSeries timeseries = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "PreallocTimeseries" + ]; enum SourceEnum { API = 0; RULE = 1; @@ -25,7 +27,10 @@ message WriteRequest { message WriteResponse {} message TimeSeries { - repeated LegacyLabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; + repeated LegacyLabelPair labels = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "LabelAdapter" + ]; // Sorted by time, oldest sample first. repeated LegacySample samples = 2 [(gogoproto.nullable) = false]; // repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; @@ -33,14 +38,14 @@ message TimeSeries { message MetricMetadata { enum MetricType { - UNKNOWN = 0; - COUNTER = 1; - GAUGE = 2; - HISTOGRAM = 3; + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; GAUGEHISTOGRAM = 4; - SUMMARY = 5; - INFO = 6; - STATESET = 7; + SUMMARY = 5; + INFO = 6; + STATESET = 7; } MetricType type = 1; @@ -50,5 +55,8 @@ message MetricMetadata { } message Metric { - repeated LegacyLabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; -} \ No newline at end of file + repeated LegacyLabelPair labels = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "LabelAdapter" + ]; +} diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto index fdf3f14d2d0fe..00fc806b2d54e 100644 --- a/pkg/logqlmodel/stats/stats.proto +++ b/pkg/logqlmodel/stats/stats.proto @@ -4,21 +4,29 @@ package stats; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option go_package = "github.com/grafana/loki/pkg/logqlmodel/stats"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; -option go_package = "github.com/grafana/loki/pkg/logqlmodel/stats"; - // Result contains LogQL query statistics. message Result { - Summary summary = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "summary"]; - Querier querier = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "querier"]; - Ingester ingester = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "ingester"]; + Summary summary = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "summary" + ]; + Querier querier = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "querier" + ]; + Ingester ingester = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "ingester" + ]; } // Summary is the summary of a query statistics. message Summary { - // Total bytes processed per second. + // Total bytes processed per second. int64 bytesProcessedPerSecond = 1 [(gogoproto.jsontag) = "bytesProcessedPerSecond"]; // Total lines processed per second. int64 linesProcessedPerSecond = 2 [(gogoproto.jsontag) = "linesProcessedPerSecond"]; @@ -39,7 +47,10 @@ message Summary { } message Querier { - Store store = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "store"]; + Store store = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "store" + ]; } message Ingester { @@ -52,19 +63,24 @@ message Ingester { // Total lines sent by ingesters. int64 totalLinesSent = 4 [(gogoproto.jsontag) = "totalLinesSent"]; - Store store = 5 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "store"]; - + Store store = 5 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "store" + ]; } message Store { - // The total of chunk reference fetched from index. - int64 totalChunksRef = 1 [(gogoproto.jsontag) = "totalChunksRef"]; - // Total number of chunks fetched. - int64 totalChunksDownloaded = 2 [(gogoproto.jsontag) = "totalChunksDownloaded"]; - // Time spent fetching chunks in nanoseconds. - int64 chunksDownloadTime = 3 [(gogoproto.jsontag) = "chunksDownloadTime"]; + // The total of chunk reference fetched from index. + int64 totalChunksRef = 1 [(gogoproto.jsontag) = "totalChunksRef"]; + // Total number of chunks fetched. + int64 totalChunksDownloaded = 2 [(gogoproto.jsontag) = "totalChunksDownloaded"]; + // Time spent fetching chunks in nanoseconds. + int64 chunksDownloadTime = 3 [(gogoproto.jsontag) = "chunksDownloadTime"]; - Chunk chunk = 4 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "chunk"]; + Chunk chunk = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "chunk" + ]; } message Chunk { diff --git a/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto b/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto index f2ebd5d683c40..d2142c286b7d9 100644 --- a/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto +++ b/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto @@ -4,19 +4,18 @@ syntax = "proto3"; // in order to not break backward compatibility. package frontend; -option go_package = "frontendv1pb"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; import "pkg/querier/stats/stats.proto"; +option go_package = "frontendv1pb"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; service Frontend { // After calling this method, client enters a loop, in which it waits for // a "FrontendToClient" message and replies with single "ClientToFrontend" message. - rpc Process(stream ClientToFrontend) returns (stream FrontendToClient) {}; + rpc Process(stream ClientToFrontend) returns (stream FrontendToClient) {} // The client notifies the query-frontend that it started a graceful shutdown. rpc NotifyClientShutdown(NotifyClientShutdownRequest) returns (NotifyClientShutdownResponse); diff --git a/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto b/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto index 861849e4b5fef..d40865ca03a3c 100644 --- a/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto +++ b/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto @@ -2,27 +2,26 @@ syntax = "proto3"; package frontendv2pb; -option go_package = "frontendv2pb"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; import "pkg/querier/stats/stats.proto"; +option go_package = "frontendv2pb"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; // Frontend interface exposed to Queriers. Used by queriers to report back the result of the query. service FrontendForQuerier { - rpc QueryResult (QueryResultRequest) returns (QueryResultResponse) { }; + rpc QueryResult(QueryResultRequest) returns (QueryResultResponse) {} } message QueryResultRequest { - uint64 queryID = 1; - httpgrpc.HTTPResponse httpResponse = 2; - stats.Stats stats = 3; + uint64 queryID = 1; + httpgrpc.HTTPResponse httpResponse = 2; + stats.Stats stats = 3; - // There is no userID field here, because Querier puts userID into the context when - // calling QueryResult, and that is where Frontend expects to find it. +// There is no userID field here, because Querier puts userID into the context when +// calling QueryResult, and that is where Frontend expects to find it. } -message QueryResultResponse { } +message QueryResultResponse {} diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index 8faa96547f9fa..3765740897345 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -691,65 +691,65 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 918 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xf8, 0xff, 0x4e, 0x68, 0x80, 0x49, 0x69, 0x57, 0x46, 0xda, 0xb5, 0x7c, 0x00, 0x23, - 0xe8, 0x5a, 0xa4, 0xc0, 0x01, 0x01, 0xa2, 0xab, 0x80, 0xa8, 0x54, 0x21, 0xb4, 0xb5, 0xb8, 0xa2, - 0x71, 0x3c, 0xb1, 0x57, 0xd9, 0xdd, 0xd9, 0xcc, 0x8c, 0x2b, 0xe5, 0xc6, 0x17, 0x40, 0xea, 0x67, - 0x00, 0x24, 0x10, 0x9f, 0x82, 0x63, 0x8e, 0x3e, 0x56, 0x95, 0x58, 0x88, 0x73, 0x01, 0x9f, 0xfa, - 0x11, 0xd0, 0xcc, 0xec, 0xae, 0xc7, 0x25, 0x21, 0x71, 0x73, 0x41, 0x5c, 0xec, 0x79, 0x6f, 0xde, - 0x6f, 0xf6, 0xbd, 0xdf, 0xfb, 0xbd, 0x07, 0xdf, 0x4c, 0x0f, 0x27, 0x83, 0xa3, 0x19, 0x61, 0x21, - 0x61, 0xea, 0xff, 0x98, 0xe1, 0x64, 0x42, 0x8c, 0xa3, 0x97, 0x32, 0x2a, 0x28, 0x82, 0x2b, 0x4f, - 0xe7, 0xce, 0x24, 0x14, 0xd3, 0xd9, 0xc8, 0xdb, 0xa7, 0xf1, 0x60, 0x42, 0x27, 0x74, 0xa0, 0x42, - 0x46, 0xb3, 0x03, 0x65, 0x29, 0x43, 0x9d, 0x34, 0xb4, 0xf3, 0xba, 0xfc, 0x46, 0x44, 0x27, 0xfa, - 0xa2, 0x38, 0xe4, 0x97, 0xdd, 0xfc, 0xf2, 0x28, 0x8a, 0xe9, 0x98, 0x44, 0x03, 0x2e, 0xb0, 0xe0, - 0xfa, 0x37, 0x8f, 0xf8, 0xe0, 0xd2, 0x14, 0x47, 0x98, 0xff, 0x33, 0xe3, 0x8e, 0x3b, 0xa1, 0x74, - 0x12, 0x91, 0x55, 0x72, 0x22, 0x8c, 0x09, 0x17, 0x38, 0x4e, 0x75, 0x40, 0x6f, 0x5e, 0x85, 0x5b, - 0x0f, 0xe8, 0x61, 0x18, 0x90, 0xa3, 0x19, 0xe1, 0x02, 0xdd, 0x84, 0x0d, 0xf5, 0x88, 0x0d, 0xba, - 0xa0, 0x6f, 0x05, 0xda, 0x90, 0xde, 0x28, 0x8c, 0x43, 0x61, 0x57, 0xbb, 0xa0, 0x7f, 0x23, 0xd0, - 0x06, 0x42, 0xb0, 0xce, 0x05, 0x49, 0xed, 0x5a, 0x17, 0xf4, 0x6b, 0x81, 0x3a, 0xa3, 0x0e, 0x6c, - 0x87, 0x89, 0x20, 0xec, 0x11, 0x8e, 0x6c, 0x4b, 0xf9, 0x4b, 0x1b, 0x7d, 0x02, 0x5b, 0x5c, 0x60, - 0x26, 0x86, 0xdc, 0xae, 0x77, 0x41, 0x7f, 0x6b, 0xb7, 0xe3, 0xe9, 0xf4, 0xbc, 0x22, 0x3d, 0x6f, - 0x58, 0xa4, 0xe7, 0xb7, 0x4f, 0x32, 0xb7, 0xf2, 0xf8, 0x77, 0x17, 0x04, 0x05, 0x08, 0x7d, 0x08, - 0x1b, 0x24, 0x19, 0x0f, 0xb9, 0xdd, 0xd8, 0x00, 0xad, 0x21, 0xe8, 0x5d, 0x68, 0x8d, 0x43, 0x46, - 0xf6, 0x45, 0x48, 0x13, 0xbb, 0xd9, 0x05, 0xfd, 0xed, 0xdd, 0x1d, 0xaf, 0x6c, 0xc3, 0x5e, 0x71, - 0x15, 0xac, 0xa2, 0x64, 0x79, 0x29, 0x16, 0x53, 0xbb, 0xa5, 0x98, 0x50, 0x67, 0xd4, 0x83, 0x4d, - 0x3e, 0xc5, 0x6c, 0xcc, 0xed, 0x76, 0xb7, 0xd6, 0xb7, 0x7c, 0xb8, 0xcc, 0xdc, 0xdc, 0x13, 0xe4, - 0xff, 0xbd, 0xbf, 0x00, 0x44, 0x92, 0xd2, 0xfb, 0x09, 0x17, 0x38, 0x11, 0x2f, 0xc2, 0xec, 0x47, - 0xb0, 0x29, 0x1b, 0x35, 0xe4, 0x8a, 0xdb, 0xab, 0x96, 0x9a, 0x63, 0xd6, 0x6b, 0xad, 0x6f, 0x54, - 0x6b, 0xe3, 0xdc, 0x5a, 0x9b, 0x17, 0xd6, 0xfa, 0x7d, 0x1d, 0xbe, 0xa4, 0xe5, 0xc3, 0x53, 0x9a, - 0x70, 0x22, 0x41, 0x0f, 0x05, 0x16, 0x33, 0xae, 0xcb, 0xcc, 0x41, 0xca, 0x13, 0xe4, 0x37, 0xe8, - 0x53, 0x58, 0xdf, 0xc3, 0x02, 0xab, 0x92, 0xb7, 0x76, 0x6f, 0x7a, 0x86, 0x6a, 0xe5, 0x5b, 0xf2, - 0xce, 0xbf, 0x25, 0xab, 0x5a, 0x66, 0xee, 0xf6, 0x18, 0x0b, 0xfc, 0x0e, 0x8d, 0x43, 0x41, 0xe2, - 0x54, 0x1c, 0x07, 0x0a, 0x89, 0xde, 0x87, 0xd6, 0x67, 0x8c, 0x51, 0x36, 0x3c, 0x4e, 0x89, 0xa2, - 0xc8, 0xf2, 0x6f, 0x2f, 0x33, 0x77, 0x87, 0x14, 0x4e, 0x03, 0xb1, 0x8a, 0x44, 0x6f, 0xc1, 0x86, - 0x32, 0x14, 0x29, 0x96, 0xbf, 0xb3, 0xcc, 0xdc, 0x97, 0x15, 0xc4, 0x08, 0xd7, 0x11, 0xeb, 0x1c, - 0x36, 0xae, 0xc4, 0x61, 0xd9, 0xca, 0xa6, 0xd9, 0x4a, 0x1b, 0xb6, 0x1e, 0x11, 0xc6, 0xe5, 0x33, - 0x2d, 0xe5, 0x2f, 0x4c, 0x74, 0x0f, 0x42, 0x49, 0x4c, 0xc8, 0x45, 0xb8, 0x2f, 0xf5, 0x24, 0xc9, - 0xb8, 0xe1, 0xe9, 0xa9, 0x0f, 0x08, 0x9f, 0x45, 0xc2, 0x47, 0x39, 0x0b, 0x46, 0x60, 0x60, 0x9c, - 0xd1, 0x0f, 0x00, 0xb6, 0xbe, 0x20, 0x78, 0x4c, 0x18, 0xb7, 0xad, 0x6e, 0xad, 0xbf, 0xb5, 0xdb, - 0xf7, 0xd6, 0x57, 0x82, 0xf7, 0x15, 0xa3, 0x31, 0x11, 0x53, 0x32, 0xe3, 0x45, 0x8f, 0x34, 0xc0, - 0xff, 0xe6, 0x69, 0xe6, 0x7e, 0x6d, 0x2e, 0x31, 0x86, 0x0f, 0x70, 0x82, 0x07, 0x11, 0x3d, 0x0c, - 0x07, 0x57, 0x5a, 0x37, 0x17, 0xbe, 0xbd, 0xcc, 0x5c, 0x70, 0x27, 0x28, 0x32, 0xeb, 0xfd, 0x06, - 0xe0, 0xab, 0xb2, 0xb1, 0x0f, 0xe5, 0x7b, 0xdc, 0x98, 0x87, 0x18, 0x8b, 0xfd, 0xa9, 0x0d, 0xa4, - 0xba, 0x02, 0x6d, 0x98, 0x3b, 0xa2, 0x7a, 0xad, 0x1d, 0x51, 0xdb, 0x7c, 0x47, 0x14, 0x43, 0x50, - 0x3f, 0x77, 0x08, 0x1a, 0x17, 0x0e, 0xc1, 0xaf, 0x55, 0x3d, 0xf0, 0x45, 0x7d, 0x1b, 0x8c, 0xc2, - 0xe7, 0xe5, 0x28, 0xd4, 0x54, 0xb6, 0xa5, 0xc2, 0xf4, 0x5b, 0xf7, 0xc7, 0x24, 0x11, 0xe1, 0x41, - 0x48, 0xd8, 0x25, 0x03, 0x61, 0xa8, 0xac, 0xb6, 0xae, 0x32, 0x53, 0x22, 0xf5, 0xff, 0xac, 0x44, - 0x7e, 0x02, 0xf0, 0x35, 0x49, 0xe1, 0x03, 0x3c, 0x22, 0xd1, 0x97, 0x38, 0x5e, 0xc9, 0xc4, 0x10, - 0x04, 0xb8, 0x96, 0x20, 0xaa, 0x2f, 0x2e, 0x88, 0xda, 0x4a, 0x10, 0xbd, 0x1f, 0xab, 0xf0, 0xd6, - 0xf3, 0x99, 0x6e, 0xd0, 0xf0, 0x37, 0x8c, 0x86, 0x5b, 0x3e, 0xfa, 0xdf, 0x36, 0xf4, 0x17, 0x00, - 0xdb, 0xc5, 0x32, 0x47, 0x1e, 0x84, 0x7a, 0xa1, 0xa9, 0x7d, 0xad, 0xc9, 0xd9, 0x96, 0x6b, 0x8d, - 0x95, 0xde, 0xc0, 0x88, 0x40, 0x09, 0x6c, 0x6a, 0x2b, 0x9f, 0x8b, 0xdb, 0xc6, 0x5c, 0x08, 0x46, - 0x70, 0x7c, 0x6f, 0x8c, 0x53, 0x41, 0x98, 0xff, 0xb1, 0xec, 0xd8, 0xd3, 0xcc, 0x7d, 0xfb, 0xdf, - 0x6a, 0x7a, 0x0e, 0x2b, 0x9b, 0xa2, 0xbf, 0x1b, 0xe4, 0x5f, 0xe9, 0x7d, 0x07, 0xe0, 0x2b, 0x32, - 0x59, 0x59, 0x5b, 0xd9, 0xcd, 0x3d, 0xd8, 0x66, 0xf9, 0x39, 0x57, 0x5e, 0xef, 0x72, 0x9e, 0xfd, - 0xfa, 0x49, 0xe6, 0x82, 0xa0, 0x44, 0xa2, 0xbb, 0x6b, 0x4b, 0xbe, 0x7a, 0xde, 0x92, 0x97, 0x90, - 0x8a, 0xb9, 0xd6, 0xfd, 0xf7, 0xe6, 0xa7, 0x4e, 0xe5, 0xc9, 0xa9, 0x53, 0x79, 0x76, 0xea, 0x80, - 0x6f, 0x17, 0x0e, 0xf8, 0x79, 0xe1, 0x80, 0x93, 0x85, 0x03, 0xe6, 0x0b, 0x07, 0xfc, 0xb1, 0x70, - 0xc0, 0x9f, 0x0b, 0xa7, 0xf2, 0x6c, 0xe1, 0x80, 0xc7, 0x67, 0x4e, 0x65, 0x7e, 0xe6, 0x54, 0x9e, - 0x9c, 0x39, 0x95, 0x51, 0x53, 0x55, 0x79, 0xf7, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xd3, - 0xfe, 0x78, 0xcf, 0x0a, 0x00, 0x00, + // 915 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0x4f, 0x6f, 0x23, 0x35, + 0x14, 0x8f, 0xf3, 0x7f, 0x5c, 0xb6, 0x80, 0xbb, 0xec, 0x8e, 0x8a, 0x34, 0x13, 0xe5, 0x00, 0x41, + 0xb0, 0x13, 0xd1, 0x05, 0x0e, 0x08, 0x10, 0x3b, 0x2a, 0x88, 0x95, 0x56, 0x08, 0x79, 0x23, 0xae, + 0xc8, 0x69, 0xdc, 0x64, 0xd4, 0x99, 0xf1, 0xd4, 0x76, 0x56, 0xea, 0x8d, 0x2f, 0x80, 0xb4, 0x9f, + 0x01, 0x90, 0x40, 0x7c, 0x0a, 0x8e, 0x3d, 0xf6, 0xb8, 0x5a, 0x89, 0x81, 0xa6, 0x17, 0xc8, 0x69, + 0x3f, 0x02, 0xb2, 0x3d, 0x93, 0x38, 0x4b, 0x4b, 0x9b, 0xed, 0x05, 0x71, 0x49, 0xfc, 0xec, 0xf7, + 0xf3, 0xf8, 0xf7, 0x7b, 0xbf, 0xf7, 0xe0, 0x9b, 0xd9, 0xc1, 0xb8, 0x7f, 0x38, 0xa5, 0x3c, 0xa2, + 0x5c, 0xff, 0x1f, 0x71, 0x92, 0x8e, 0xa9, 0xb5, 0x0c, 0x32, 0xce, 0x24, 0x43, 0x70, 0xb9, 0xb3, + 0x7d, 0x67, 0x1c, 0xc9, 0xc9, 0x74, 0x18, 0xec, 0xb1, 0xa4, 0x3f, 0x66, 0x63, 0xd6, 0xd7, 0x29, + 0xc3, 0xe9, 0xbe, 0x8e, 0x74, 0xa0, 0x57, 0x06, 0xba, 0xed, 0x8f, 0x19, 0x1b, 0xc7, 0x74, 0x99, + 0x25, 0xa3, 0x84, 0x0a, 0x49, 0x92, 0xac, 0x48, 0x78, 0x5d, 0x3d, 0x22, 0x66, 0x63, 0x83, 0x2c, + 0x17, 0xc5, 0x61, 0xa7, 0x38, 0x3c, 0x8c, 0x13, 0x36, 0xa2, 0x71, 0x5f, 0x48, 0x22, 0x85, 0xf9, + 0x2d, 0x32, 0x3e, 0xb8, 0x94, 0xc3, 0x90, 0x88, 0x7f, 0x52, 0xea, 0x9e, 0x54, 0xe1, 0xc6, 0x03, + 0x76, 0x10, 0x61, 0x7a, 0x38, 0xa5, 0x42, 0xa2, 0x9b, 0xb0, 0xa1, 0x73, 0x5c, 0xd0, 0x01, 0x3d, + 0x07, 0x9b, 0x40, 0xed, 0xc6, 0x51, 0x12, 0x49, 0xb7, 0xda, 0x01, 0xbd, 0x1b, 0xd8, 0x04, 0x08, + 0xc1, 0xba, 0x90, 0x34, 0x73, 0x6b, 0x1d, 0xd0, 0xab, 0x61, 0xbd, 0x46, 0xdb, 0xb0, 0x1d, 0xa5, + 0x92, 0xf2, 0x47, 0x24, 0x76, 0x1d, 0xbd, 0xbf, 0x88, 0xd1, 0x27, 0xb0, 0x25, 0x24, 0xe1, 0x72, + 0x20, 0xdc, 0x7a, 0x07, 0xf4, 0x36, 0x76, 0xb6, 0x03, 0xa3, 0x4a, 0x50, 0xaa, 0x12, 0x0c, 0x4a, + 0x55, 0xc2, 0xf6, 0x71, 0xee, 0x57, 0x1e, 0xff, 0xee, 0x03, 0x5c, 0x82, 0xd0, 0x87, 0xb0, 0x41, + 0xd3, 0xd1, 0x40, 0xb8, 0x8d, 0x35, 0xd0, 0x06, 0x82, 0xde, 0x85, 0xce, 0x28, 0xe2, 0x74, 0x4f, + 0x46, 0x2c, 0x75, 0x9b, 0x1d, 0xd0, 0xdb, 0xdc, 0xd9, 0x0a, 0x16, 0x2a, 0xef, 0x96, 0x47, 0x78, + 0x99, 0xa5, 0xe8, 0x65, 0x44, 0x4e, 0xdc, 0x96, 0x56, 0x42, 0xaf, 0x51, 0x17, 0x36, 0xc5, 0x84, + 0xf0, 0x91, 0x70, 0xdb, 0x9d, 0x5a, 0xcf, 0x09, 0xe1, 0x3c, 0xf7, 0x8b, 0x1d, 0x5c, 0xfc, 0x77, + 0xff, 0x02, 0x10, 0x29, 0x49, 0xef, 0xa7, 0x42, 0x92, 0x54, 0xbe, 0x88, 0xb2, 0x1f, 0xc1, 0xa6, + 0xf2, 0xc7, 0x40, 0x68, 0x6d, 0xaf, 0x4a, 0xb5, 0xc0, 0xac, 0x72, 0xad, 0xaf, 0xc5, 0xb5, 0x71, + 0x2e, 0xd7, 0xe6, 0x85, 0x5c, 0xbf, 0xaf, 0xc3, 0x97, 0x8c, 0x7d, 0x44, 0xc6, 0x52, 0x41, 0x15, + 0xe8, 0xa1, 0x24, 0x72, 0x2a, 0x0c, 0xcd, 0x02, 0xa4, 0x77, 0x70, 0x71, 0x82, 0x3e, 0x85, 0xf5, + 0x5d, 0x22, 0x89, 0xa6, 0xbc, 0xb1, 0x73, 0x33, 0xb0, 0x4c, 0xa9, 0xee, 0x52, 0x67, 0xe1, 0x2d, + 0xc5, 0x6a, 0x9e, 0xfb, 0x9b, 0x23, 0x22, 0xc9, 0x3b, 0x2c, 0x89, 0x24, 0x4d, 0x32, 0x79, 0x84, + 0x35, 0x12, 0xbd, 0x0f, 0x9d, 0xcf, 0x38, 0x67, 0x7c, 0x70, 0x94, 0x51, 0x2d, 0x91, 0x13, 0xde, + 0x9e, 0xe7, 0xfe, 0x16, 0x2d, 0x37, 0x2d, 0xc4, 0x32, 0x13, 0xbd, 0x05, 0x1b, 0x3a, 0xd0, 0xa2, + 0x38, 0xe1, 0xd6, 0x3c, 0xf7, 0x5f, 0xd6, 0x10, 0x2b, 0xdd, 0x64, 0xac, 0x6a, 0xd8, 0xb8, 0x92, + 0x86, 0x8b, 0x52, 0x36, 0xed, 0x52, 0xba, 0xb0, 0xf5, 0x88, 0x72, 0xa1, 0xae, 0x69, 0xe9, 0xfd, + 0x32, 0x44, 0xf7, 0x20, 0x54, 0xc2, 0x44, 0x42, 0x46, 0x7b, 0xca, 0x4f, 0x4a, 0x8c, 0x1b, 0x81, + 0x69, 0x6a, 0x4c, 0xc5, 0x34, 0x96, 0x21, 0x2a, 0x54, 0xb0, 0x12, 0xb1, 0xb5, 0x46, 0x3f, 0x00, + 0xd8, 0xfa, 0x82, 0x92, 0x11, 0xe5, 0xc2, 0x75, 0x3a, 0xb5, 0xde, 0xc6, 0x4e, 0x2f, 0x58, 0xed, + 0xf8, 0xe0, 0x2b, 0xce, 0x12, 0x2a, 0x27, 0x74, 0x2a, 0xca, 0x1a, 0x19, 0x40, 0xf8, 0xcd, 0xd3, + 0xdc, 0xff, 0xda, 0x1e, 0x62, 0x9c, 0xec, 0x93, 0x94, 0xf4, 0x63, 0x76, 0x10, 0xf5, 0xaf, 0x34, + 0x4d, 0x2e, 0xbc, 0x7b, 0x9e, 0xfb, 0xe0, 0x0e, 0x2e, 0x5f, 0xd6, 0xfd, 0x0d, 0xc0, 0x57, 0x55, + 0x61, 0x1f, 0xaa, 0xfb, 0x84, 0xd5, 0x0f, 0x09, 0x91, 0x7b, 0x13, 0x17, 0x28, 0x77, 0x61, 0x13, + 0xd8, 0x33, 0xa2, 0x7a, 0xad, 0x19, 0x51, 0x5b, 0x7f, 0x46, 0x94, 0x4d, 0x50, 0x3f, 0xb7, 0x09, + 0x1a, 0x17, 0x36, 0xc1, 0xaf, 0x55, 0xd3, 0xf0, 0x25, 0xbf, 0x35, 0x5a, 0xe1, 0xf3, 0x45, 0x2b, + 0xd4, 0xf4, 0x6b, 0x17, 0x0e, 0x33, 0x77, 0xdd, 0x1f, 0xd1, 0x54, 0x46, 0xfb, 0x11, 0xe5, 0x97, + 0x34, 0x84, 0xe5, 0xb2, 0xda, 0xaa, 0xcb, 0x6c, 0x8b, 0xd4, 0xff, 0xb3, 0x16, 0xf9, 0x09, 0xc0, + 0xd7, 0x94, 0x84, 0x0f, 0xc8, 0x90, 0xc6, 0x5f, 0x92, 0x64, 0x69, 0x13, 0xcb, 0x10, 0xe0, 0x5a, + 0x86, 0xa8, 0xbe, 0xb8, 0x21, 0x6a, 0x4b, 0x43, 0x74, 0x7f, 0xac, 0xc2, 0x5b, 0xcf, 0xbf, 0x74, + 0x8d, 0x82, 0xbf, 0x61, 0x15, 0xdc, 0x09, 0xd1, 0xff, 0xb6, 0xa0, 0xbf, 0x00, 0xd8, 0x2e, 0x87, + 0x39, 0x0a, 0x20, 0x34, 0x03, 0x4d, 0xcf, 0x6b, 0x23, 0xce, 0xa6, 0x1a, 0x6b, 0x7c, 0xb1, 0x8b, + 0xad, 0x0c, 0x94, 0xc2, 0xa6, 0x89, 0x8a, 0xbe, 0xb8, 0x6d, 0xf5, 0x85, 0xe4, 0x94, 0x24, 0xf7, + 0x46, 0x24, 0x93, 0x94, 0x87, 0x1f, 0xab, 0x8a, 0x3d, 0xcd, 0xfd, 0xb7, 0xff, 0x8d, 0xd3, 0x73, + 0x58, 0x55, 0x14, 0xf3, 0x5d, 0x5c, 0x7c, 0xa5, 0xfb, 0x1d, 0x80, 0xaf, 0xa8, 0xc7, 0x2a, 0x6e, + 0x8b, 0x6a, 0xee, 0xc2, 0x36, 0x2f, 0xd6, 0x85, 0xf3, 0xba, 0x97, 0xeb, 0x1c, 0xd6, 0x8f, 0x73, + 0x1f, 0xe0, 0x05, 0x12, 0xdd, 0x5d, 0x19, 0xf2, 0xd5, 0xf3, 0x86, 0xbc, 0x82, 0x54, 0xec, 0xb1, + 0x1e, 0xbe, 0x77, 0x72, 0xea, 0x55, 0x9e, 0x9c, 0x7a, 0x95, 0x67, 0xa7, 0x1e, 0xf8, 0x76, 0xe6, + 0x81, 0x9f, 0x67, 0x1e, 0x38, 0x9e, 0x79, 0xe0, 0x64, 0xe6, 0x81, 0x3f, 0x66, 0x1e, 0xf8, 0x73, + 0xe6, 0x55, 0x9e, 0xcd, 0x3c, 0xf0, 0xf8, 0xcc, 0xab, 0x9c, 0x9c, 0x79, 0x95, 0x27, 0x67, 0x5e, + 0x65, 0xd8, 0xd4, 0x2c, 0xef, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x19, 0xca, 0xfc, 0xcf, + 0x0a, 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 21d47641a28ca..48593f659dad4 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -3,10 +3,10 @@ syntax = "proto3"; package queryrange; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; import "pkg/logproto/logproto.proto"; import "pkg/logqlmodel/stats/stats.proto"; import "pkg/querier/queryrange/queryrangebase/queryrange.proto"; -import "google/protobuf/timestamp.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -16,8 +16,14 @@ message LokiRequest { uint32 limit = 2; int64 step = 3; int64 interval = 9; - google.protobuf.Timestamp startTs = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp endTs = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp startTs = 4 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp endTs = 5 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; logproto.Direction direction = 6; string path = 7; repeated string shards = 8 [(gogoproto.jsontag) = "shards"]; @@ -26,7 +32,10 @@ message LokiRequest { message LokiInstantRequest { string query = 1; uint32 limit = 2; - google.protobuf.Timestamp timeTs = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timeTs = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; logproto.Direction direction = 4; string path = 5; repeated string shards = 6 [(gogoproto.jsontag) = "shards"]; @@ -34,35 +43,61 @@ message LokiInstantRequest { message LokiResponse { string Status = 1 [(gogoproto.jsontag) = "status"]; - LokiData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; + LokiData Data = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "data,omitempty" + ]; string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; logproto.Direction direction = 5; uint32 limit = 6; uint32 version = 7; - stats.Result statistics = 8 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "statistics"]; - repeated queryrangebase.PrometheusResponseHeader Headers = 9 [(gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader"]; + stats.Result statistics = 8 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "statistics" + ]; + repeated queryrangebase.PrometheusResponseHeader Headers = 9 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" + ]; } message LokiSeriesRequest { repeated string match = 1; - google.protobuf.Timestamp startTs = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp endTs = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp startTs = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp endTs = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; string path = 4; repeated string shards = 5 [(gogoproto.jsontag) = "shards"]; - } message LokiSeriesResponse { string Status = 1 [(gogoproto.jsontag) = "status"]; - repeated logproto.SeriesIdentifier Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; + repeated logproto.SeriesIdentifier Data = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "data,omitempty" + ]; uint32 version = 3; - repeated queryrangebase.PrometheusResponseHeader Headers = 4 [(gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader"]; + repeated queryrangebase.PrometheusResponseHeader Headers = 4 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" + ]; } message LokiLabelNamesRequest { - google.protobuf.Timestamp startTs = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp endTs = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp startTs = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp endTs = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; string path = 3; } @@ -70,15 +105,21 @@ message LokiLabelNamesResponse { string Status = 1 [(gogoproto.jsontag) = "status"]; repeated string Data = 2 [(gogoproto.jsontag) = "data,omitempty"]; uint32 version = 3; - repeated queryrangebase.PrometheusResponseHeader Headers = 4 [(gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader"]; + repeated queryrangebase.PrometheusResponseHeader Headers = 4 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" + ]; } message LokiData { string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - repeated logproto.StreamAdapter Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result", (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.Stream"]; + repeated logproto.StreamAdapter Result = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "result", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.Stream" + ]; } - // LokiPromResponse wraps a Prometheus response with statistics. message LokiPromResponse { queryrangebase.PrometheusResponse response = 1 [(gogoproto.nullable) = true]; diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go index dd5ff378e1a56..5b7689d17f58d 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go @@ -592,8 +592,8 @@ var fileDescriptor_4cc6a0c1d6b614c4 = []byte{ 0xbf, 0xf7, 0xf8, 0x84, 0x3e, 0xcf, 0x4e, 0x83, 0xd9, 0xab, 0x1c, 0x58, 0x08, 0x4c, 0xfe, 0x5f, 0x32, 0x9a, 0x04, 0xd0, 0x12, 0xe7, 0x94, 0xb7, 0x55, 0x37, 0x63, 0xa9, 0x48, 0xf1, 0xe8, 0x2a, 0x60, 0xef, 0x41, 0x10, 0x8a, 0x93, 0x7c, 0xee, 0x2e, 0xd2, 0x78, 0x16, 0xa4, 0x41, 0x3a, 0x93, - 0xb0, 0x79, 0xfe, 0x52, 0x6a, 0x52, 0x91, 0x92, 0x0a, 0xdf, 0xb3, 0x82, 0x34, 0x0d, 0x22, 0xd8, - 0xa0, 0xfc, 0x9c, 0x51, 0x11, 0xa6, 0x49, 0xed, 0x1f, 0x6f, 0xfb, 0x69, 0xb2, 0xac, 0x5d, 0xef, + 0xb0, 0x79, 0xfe, 0x52, 0x6a, 0x52, 0x91, 0x92, 0x0a, 0xdf, 0x1b, 0x07, 0x69, 0x1a, 0x44, 0xb0, + 0x41, 0xd1, 0x64, 0x59, 0xbb, 0xac, 0x6d, 0x97, 0x9f, 0x33, 0x2a, 0xc2, 0x34, 0xa9, 0xfd, 0xef, 0x57, 0x1d, 0x47, 0x69, 0xa0, 0x72, 0x36, 0x82, 0x72, 0x3a, 0xc7, 0xe8, 0xde, 0x73, 0x96, 0xc6, 0x20, 0x4e, 0x20, 0xe7, 0x04, 0x5e, 0xe5, 0xc0, 0xc5, 0x13, 0xa0, 0x3e, 0x30, 0x3c, 0x46, 0xdd, 0xaf, 0x68, 0x0c, 0xa6, 0x36, 0xd1, 0xa6, 0x43, 0xaf, 0x57, 0x16, 0xb6, 0xf6, 0x80, 0x48, 0x13, @@ -601,8 +601,8 @@ var fileDescriptor_4cc6a0c1d6b614c4 = []byte{ 0x3b, 0xe8, 0xbd, 0x6b, 0x59, 0x31, 0x46, 0xdd, 0x8c, 0x8a, 0x13, 0x95, 0x8f, 0x48, 0x19, 0xdf, 0x41, 0x3d, 0x2e, 0x28, 0x13, 0xe6, 0xce, 0x44, 0x9b, 0xea, 0x44, 0x29, 0xf8, 0x16, 0xd2, 0x21, 0xf1, 0x4d, 0x5d, 0xda, 0x2a, 0xb1, 0x8a, 0xe5, 0x02, 0x32, 0xb3, 0x2b, 0x4d, 0x52, 0xc6, 0x0f, - 0x51, 0x5f, 0x84, 0x31, 0xa4, 0xb9, 0x30, 0x7b, 0x13, 0x6d, 0xba, 0xbb, 0x3f, 0x76, 0x15, 0x09, - 0x6e, 0x43, 0x82, 0x7b, 0x58, 0x93, 0xe4, 0x0d, 0xce, 0x0a, 0xbb, 0xf3, 0xf3, 0x1f, 0xb6, 0x46, + 0x51, 0x5f, 0x84, 0x31, 0xa4, 0xb9, 0x30, 0x7b, 0x13, 0x6d, 0xba, 0xbb, 0x3f, 0x76, 0x15, 0x13, + 0x6e, 0xc3, 0x84, 0x7b, 0x58, 0x33, 0xe1, 0x0d, 0xce, 0x0a, 0xbb, 0xf3, 0xf3, 0x1f, 0xb6, 0x46, 0x9a, 0x98, 0xaa, 0xb4, 0x1c, 0x89, 0x69, 0xc8, 0x7e, 0x94, 0x82, 0x8f, 0xd0, 0x68, 0x41, 0x17, 0x27, 0x61, 0x12, 0x7c, 0x9d, 0x55, 0x91, 0xdc, 0xec, 0xcb, 0xdc, 0x96, 0x7b, 0x75, 0x7e, 0xee, 0xa3, 0x2b, 0x28, 0xaf, 0x5b, 0x15, 0x20, 0x5b, 0xb1, 0xf8, 0x09, 0xea, 0x2b, 0x32, 0xb9, 0x39, @@ -636,8 +636,8 @@ var fileDescriptor_4cc6a0c1d6b614c4 = []byte{ 0x58, 0x9d, 0xb7, 0x17, 0x96, 0xf6, 0xc3, 0xca, 0xd2, 0x7e, 0x59, 0x59, 0xda, 0xd9, 0xca, 0xd2, 0xce, 0x57, 0x96, 0xf6, 0xe7, 0xca, 0xd2, 0xfe, 0x5a, 0x59, 0x9d, 0xb7, 0x2b, 0x4b, 0xfb, 0xe9, 0xd2, 0xea, 0x9c, 0x5f, 0x5a, 0x9d, 0x37, 0x97, 0x56, 0xe7, 0xdb, 0x87, 0xef, 0xda, 0x84, 0xff, - 0xfc, 0xbd, 0x9b, 0x1b, 0xf2, 0x39, 0x9f, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x10, 0x2b, - 0x2c, 0x1f, 0x07, 0x00, 0x00, + 0xfc, 0xbd, 0x9b, 0x1b, 0xf2, 0x39, 0x9f, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x93, 0x01, 0x6e, + 0x11, 0x1f, 0x07, 0x00, 0x00, } func (this *PrometheusRequestHeader) Equal(that interface{}) bool { diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.proto b/pkg/querier/queryrange/queryrangebase/queryrange.proto index f49d3e0a60dc7..7d1666fb86b17 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.proto +++ b/pkg/querier/queryrange/queryrangebase/queryrange.proto @@ -2,13 +2,12 @@ syntax = "proto3"; package queryrangebase; -option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; import "pkg/logproto/logproto.proto"; +option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -21,7 +20,10 @@ message PrometheusRequest { int64 start = 2; int64 end = 3; int64 step = 4; - google.protobuf.Duration timeout = 5 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + google.protobuf.Duration timeout = 5 [ + (gogoproto.stdduration) = true, + (gogoproto.nullable) = false + ]; string query = 6; CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; repeated PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"]; @@ -34,7 +36,10 @@ message PrometheusResponseHeader { message PrometheusResponse { string Status = 1 [(gogoproto.jsontag) = "status"]; - PrometheusData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; + PrometheusData Data = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "data,omitempty" + ]; string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; repeated PrometheusResponseHeader Headers = 5 [(gogoproto.jsontag) = "-"]; @@ -42,23 +47,36 @@ message PrometheusResponse { message PrometheusData { string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - repeated SampleStream Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; + repeated SampleStream Result = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "result" + ]; } message SampleStream { - repeated logproto.LegacyLabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter"]; - repeated logproto.LegacySample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "values"]; + repeated logproto.LegacyLabelPair labels = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "metric", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" + ]; + repeated logproto.LegacySample samples = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "values" + ]; } -message CachedResponse { - string key = 1 [(gogoproto.jsontag) = "key"]; +message CachedResponse { + string key = 1 [(gogoproto.jsontag) = "key"]; - // List of cached responses; non-overlapping and in order. - repeated Extent extents = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "extents"]; + // List of cached responses; non-overlapping and in order. + repeated Extent extents = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "extents" + ]; } -message Extent { - int64 start = 1 [(gogoproto.jsontag) = "start"]; +message Extent { + int64 start = 1 [(gogoproto.jsontag) = "start"]; int64 end = 2 [(gogoproto.jsontag) = "end"]; // reserved the previous key to ensure cache transition reserved 3; diff --git a/pkg/querier/stats/stats.proto b/pkg/querier/stats/stats.proto index 79f74334d1683..11a1753d320c8 100644 --- a/pkg/querier/stats/stats.proto +++ b/pkg/querier/stats/stats.proto @@ -11,7 +11,10 @@ option (gogoproto.unmarshaler_all) = true; message Stats { // The sum of all wall time spent in the querier to execute the query. - google.protobuf.Duration wall_time = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + google.protobuf.Duration wall_time = 1 [ + (gogoproto.stdduration) = true, + (gogoproto.nullable) = false + ]; // The number of series fetched for the query uint64 fetched_series_count = 2; // The number of bytes of the chunks fetched for the query diff --git a/pkg/ruler/base/ruler.proto b/pkg/ruler/base/ruler.proto index 782dd5002b6a0..8e1541037a161 100644 --- a/pkg/ruler/base/ruler.proto +++ b/pkg/ruler/base/ruler.proto @@ -2,6 +2,7 @@ // This service is used to retrieve the current state of rules running across // all Rulers in a cluster. syntax = "proto3"; + package base; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; @@ -14,7 +15,7 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; service Ruler { - rpc Rules(RulesRequest) returns (RulesResponse) {}; + rpc Rules(RulesRequest) returns (RulesResponse) {} } message RulesRequest {} @@ -27,8 +28,14 @@ message RulesResponse { message GroupStateDesc { rules.RuleGroupDesc group = 1; repeated RuleStateDesc active_rules = 2; - google.protobuf.Timestamp evaluationTimestamp = 3 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Duration evaluationDuration = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; + google.protobuf.Timestamp evaluationTimestamp = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + google.protobuf.Duration evaluationDuration = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; } // RuleStateDesc is a proto representation of a Prometheus Rule @@ -38,8 +45,14 @@ message RuleStateDesc { string health = 3; string lastError = 4; repeated AlertStateDesc alerts = 5; - google.protobuf.Timestamp evaluationTimestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Duration evaluationDuration = 7 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; + google.protobuf.Timestamp evaluationTimestamp = 6 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + google.protobuf.Duration evaluationDuration = 7 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; } message AlertStateDesc { @@ -53,14 +66,24 @@ message AlertStateDesc { (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" ]; double value = 4; - google.protobuf.Timestamp active_at = 5 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp fired_at = 6 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp resolved_at = 7 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_sent_at = 8 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp valid_until = 9 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} \ No newline at end of file + google.protobuf.Timestamp active_at = 5 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + google.protobuf.Timestamp fired_at = 6 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + google.protobuf.Timestamp resolved_at = 7 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + google.protobuf.Timestamp last_sent_at = 8 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + google.protobuf.Timestamp valid_until = 9 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; +} diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index f4c4360ef0ed0..95f06f7f1c695 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -207,13 +207,13 @@ var fileDescriptor_dd3ef3757f506fba = []byte{ 0x21, 0x2e, 0x5c, 0xe2, 0xef, 0xcb, 0xf3, 0xf3, 0x7b, 0xdf, 0x73, 0x02, 0x1f, 0xe4, 0x67, 0x49, 0x20, 0x2b, 0xce, 0xa4, 0x7e, 0x16, 0x79, 0x64, 0x56, 0x92, 0x4b, 0x51, 0x0a, 0xd4, 0xd5, 0xcd, 0xc1, 0xe3, 0x24, 0x2d, 0xdf, 0x56, 0x11, 0x89, 0xc5, 0x79, 0x90, 0x88, 0x44, 0x04, 0x1a, 0x8d, - 0xaa, 0xb9, 0xee, 0x74, 0xa3, 0x2b, 0xc3, 0x3a, 0xc0, 0x89, 0x10, 0x09, 0x67, 0x9b, 0x5d, 0xb3, - 0x4a, 0xd2, 0x32, 0x15, 0x59, 0x83, 0x0f, 0x7e, 0xc7, 0x69, 0xb6, 0x68, 0xa0, 0x7b, 0xca, 0x0f, + 0xaa, 0xb9, 0xee, 0x74, 0xa3, 0x2b, 0xc3, 0x3a, 0x18, 0x24, 0x42, 0x24, 0x9c, 0x6d, 0x76, 0xd1, + 0x6c, 0xd1, 0x40, 0xf8, 0x77, 0x68, 0x56, 0x49, 0x5a, 0xa6, 0x22, 0x6b, 0xf0, 0x7b, 0xca, 0x0f, 0x17, 0x89, 0x39, 0xb3, 0x2d, 0x0c, 0x78, 0xf8, 0x03, 0xc0, 0xbd, 0xb0, 0xe2, 0xec, 0xa5, 0x14, 0x55, 0x7e, 0xc2, 0x8a, 0x18, 0x21, 0x68, 0x67, 0xf4, 0x9c, 0xf9, 0x60, 0x04, 0xc6, 0x5e, 0xa8, 0x6b, 0x74, 0x1f, 0x7a, 0x6a, 0x2d, 0x72, 0x1a, 0x33, 0x7f, 0x47, 0x03, 0x9b, 0x17, 0xe8, 0x05, - 0x74, 0xd3, 0xac, 0x64, 0xf2, 0x82, 0x72, 0xbf, 0x33, 0x02, 0xe3, 0xde, 0xd1, 0x80, 0x18, 0x3b, - 0xa4, 0xb5, 0x43, 0x4e, 0x1a, 0xbb, 0x13, 0xf7, 0xf2, 0x7a, 0x68, 0x7d, 0xfc, 0x32, 0x04, 0xe1, + 0x74, 0xd3, 0xac, 0x64, 0xf2, 0x82, 0x72, 0xbf, 0x33, 0x02, 0xe3, 0xde, 0xd1, 0x80, 0x18, 0x4f, + 0xa4, 0xf5, 0x44, 0x4e, 0x1a, 0x4f, 0x13, 0xf7, 0xf2, 0x7a, 0x68, 0x7d, 0xfc, 0x32, 0x04, 0xe1, 0x2d, 0x09, 0x3d, 0x84, 0x26, 0x14, 0xdf, 0x1e, 0x75, 0xc6, 0xbd, 0xa3, 0x3b, 0xc4, 0xe4, 0xa5, 0x7c, 0x29, 0x4b, 0xa1, 0x41, 0x95, 0xb3, 0xaa, 0x60, 0xd2, 0x77, 0x8c, 0x33, 0x55, 0x23, 0x02, 0x77, 0x45, 0xae, 0x0e, 0x2e, 0x7c, 0x4f, 0x93, 0xf7, 0xff, 0x90, 0x3e, 0xce, 0x16, 0x61, 0xbb, @@ -229,7 +229,7 @@ var fileDescriptor_dd3ef3757f506fba = []byte{ 0xdd, 0xac, 0x30, 0x78, 0x5f, 0x63, 0xf0, 0xa9, 0xc6, 0xe0, 0xb2, 0xc6, 0x60, 0x59, 0x63, 0xf0, 0xb5, 0xc6, 0xe0, 0x5b, 0x8d, 0xad, 0x9b, 0x1a, 0x83, 0x0f, 0x6b, 0x6c, 0x2d, 0xd7, 0xd8, 0xba, 0x5a, 0x63, 0xeb, 0xf5, 0xa3, 0xbf, 0x69, 0xff, 0xf2, 0xbb, 0x45, 0x8e, 0xf6, 0xf1, 0xf4, 0x67, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x37, 0x59, 0x44, 0x8a, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x60, 0xe4, 0xd8, 0x0e, 0x8a, 0x03, 0x00, 0x00, } func (this *RuleGroupDesc) Equal(that interface{}) bool { diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto index cc10170810264..d2218fb7cac14 100644 --- a/pkg/ruler/rulespb/rules.proto +++ b/pkg/ruler/rulespb/rules.proto @@ -1,15 +1,13 @@ - syntax = "proto3"; package rules; -option go_package = "github.com/grafana/loki/pkg/ruler/rulespb"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; import "pkg/logproto/logproto.proto"; +option go_package = "github.com/grafana/loki/pkg/ruler/rulespb"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -18,8 +16,10 @@ message RuleGroupDesc { reserved 5, 7, 8; string name = 1; string namespace = 2; - google.protobuf.Duration interval = 3 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + google.protobuf.Duration interval = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; repeated RuleDesc rules = 4; string user = 6; // The options field can be used to extend Ruler functionality without @@ -35,7 +35,10 @@ message RuleDesc { string expr = 1; string record = 2; string alert = 3; - google.protobuf.Duration for = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; + google.protobuf.Duration for = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; repeated logproto.LegacyLabelPair labels = 5 [ (gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" @@ -44,4 +47,4 @@ message RuleDesc { (gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" ]; -} \ No newline at end of file +} diff --git a/pkg/scheduler/schedulerpb/scheduler.proto b/pkg/scheduler/schedulerpb/scheduler.proto index 3ae6437567a47..e90b5c912b2ef 100644 --- a/pkg/scheduler/schedulerpb/scheduler.proto +++ b/pkg/scheduler/schedulerpb/scheduler.proto @@ -2,11 +2,10 @@ syntax = "proto3"; package schedulerpb; -option go_package = "schedulerpb"; - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; +option go_package = "schedulerpb"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -18,7 +17,7 @@ service SchedulerForQuerier { // // Long-running loop is used to detect broken connection between scheduler and querier. This is important // for scheduler to keep a list of connected queriers up-to-date. - rpc QuerierLoop(stream QuerierToScheduler) returns (stream SchedulerToQuerier) { }; + rpc QuerierLoop(stream QuerierToScheduler) returns (stream SchedulerToQuerier) {} // The querier notifies the query-scheduler that it started a graceful shutdown. rpc NotifyQuerierShutdown(NotifyQuerierShutdownRequest) returns (NotifyQuerierShutdownResponse); @@ -55,7 +54,7 @@ service SchedulerForFrontend { // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. - rpc FrontendLoop(stream FrontendToScheduler) returns (stream SchedulerToFrontend) { }; + rpc FrontendLoop(stream FrontendToScheduler) returns (stream SchedulerToFrontend) {} } enum FrontendToSchedulerType { diff --git a/pkg/storage/chunk/client/grpc/grpc.proto b/pkg/storage/chunk/client/grpc/grpc.proto index 3eecd35847b8d..2c819cbd919c6 100644 --- a/pkg/storage/chunk/client/grpc/grpc.proto +++ b/pkg/storage/chunk/client/grpc/grpc.proto @@ -5,138 +5,136 @@ package grpc; import "google/protobuf/empty.proto"; service grpc_store { - /// index-client - - /// WriteIndex writes batch of indexes to the index tables. - rpc WriteIndex(WriteIndexRequest) returns (google.protobuf.Empty); - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); - /// DeleteIndex deletes the batch of index entries from the index tables - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty); - - /// storage-client - - /// PutChunks saves the batch of chunks into the chunk tables. - rpc PutChunks(PutChunksRequest) returns (google.protobuf.Empty); - /// GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams - /// batching needs to be performed at server level as per requirement instead of sending single chunk per stream. - /// In GetChunks rpc request send buf as nil - rpc GetChunks(GetChunksRequest) returns (stream GetChunksResponse); - /// DeleteChunks deletes the chunks based on chunkID. - rpc DeleteChunks(ChunkID) returns (google.protobuf.Empty); - - /// table-client - - /// Lists all the tables that exists in the database. - rpc ListTables(google.protobuf.Empty) returns (ListTablesResponse); - /// Creates a table with provided name & attributes. - rpc CreateTable(CreateTableRequest) returns (google.protobuf.Empty); - // Deletes a table using table name provided. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty); - // Describes a table information for the provided table. - rpc DescribeTable(DescribeTableRequest) returns (DescribeTableResponse); - // Update a table with newly provided table information. - rpc UpdateTable(UpdateTableRequest) returns (google.protobuf.Empty); + /// index-client + + /// WriteIndex writes batch of indexes to the index tables. + rpc WriteIndex(WriteIndexRequest) returns (google.protobuf.Empty); + /// QueryIndex reads the indexes required for given query & sends back the batch of rows + /// in rpc streams + rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); + /// DeleteIndex deletes the batch of index entries from the index tables + rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty); + + /// storage-client + + /// PutChunks saves the batch of chunks into the chunk tables. + rpc PutChunks(PutChunksRequest) returns (google.protobuf.Empty); + /// GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams + /// batching needs to be performed at server level as per requirement instead of sending single chunk per stream. + /// In GetChunks rpc request send buf as nil + rpc GetChunks(GetChunksRequest) returns (stream GetChunksResponse); + /// DeleteChunks deletes the chunks based on chunkID. + rpc DeleteChunks(ChunkID) returns (google.protobuf.Empty); + + /// table-client + + /// Lists all the tables that exists in the database. + rpc ListTables(google.protobuf.Empty) returns (ListTablesResponse); + /// Creates a table with provided name & attributes. + rpc CreateTable(CreateTableRequest) returns (google.protobuf.Empty); + // Deletes a table using table name provided. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty); + // Describes a table information for the provided table. + rpc DescribeTable(DescribeTableRequest) returns (DescribeTableResponse); + // Update a table with newly provided table information. + rpc UpdateTable(UpdateTableRequest) returns (google.protobuf.Empty); } message PutChunksRequest { - repeated Chunk chunks = 1; + repeated Chunk chunks = 1; } message GetChunksRequest { - repeated Chunk chunks = 1; + repeated Chunk chunks = 1; } message GetChunksResponse { - repeated Chunk chunks = 1; + repeated Chunk chunks = 1; } message Chunk { - bytes encoded = 1; - string key = 2; - string tableName = 3; + bytes encoded = 1; + string key = 2; + string tableName = 3; } message ChunkID { - string chunkID = 1; + string chunkID = 1; } message DeleteTableRequest { - string tableName = 1; + string tableName = 1; } message DescribeTableRequest { - string tableName = 1; + string tableName = 1; } message WriteBatch { - repeated IndexEntry writes = 1; - repeated IndexEntry deletes = 2; + repeated IndexEntry writes = 1; + repeated IndexEntry deletes = 2; } message WriteIndexRequest { - repeated IndexEntry writes = 1; + repeated IndexEntry writes = 1; } message DeleteIndexRequest { - repeated IndexEntry deletes = 1; + repeated IndexEntry deletes = 1; } message QueryIndexResponse { - repeated Row rows = 1; + repeated Row rows = 1; } message Row { - bytes rangeValue = 1; - bytes value = 2; + bytes rangeValue = 1; + bytes value = 2; } message IndexEntry { - string tableName = 1; - string hashValue = 2; - bytes rangeValue = 3; - bytes value = 4; + string tableName = 1; + string hashValue = 2; + bytes rangeValue = 3; + bytes value = 4; } message QueryIndexRequest { - string tableName = 1; - string hashValue = 2; - bytes rangeValuePrefix = 3; - bytes rangeValueStart = 4; - bytes valueEqual = 5; - bool immutable = 6; + string tableName = 1; + string hashValue = 2; + bytes rangeValuePrefix = 3; + bytes rangeValueStart = 4; + bytes valueEqual = 5; + bool immutable = 6; } message UpdateTableRequest { - TableDesc current = 1; - TableDesc expected = 2; + TableDesc current = 1; + TableDesc expected = 2; } message DescribeTableResponse { - TableDesc desc = 1; - bool isActive = 2; + TableDesc desc = 1; + bool isActive = 2; } message CreateTableRequest { - TableDesc desc = 1; + TableDesc desc = 1; } message TableDesc { - string name = 1; - bool useOnDemandIOMode = 2; - int64 provisionedRead = 3; - int64 provisionedWrite = 4; - map tags = 5; + string name = 1; + bool useOnDemandIOMode = 2; + int64 provisionedRead = 3; + int64 provisionedWrite = 4; + map tags = 5; } message ListTablesResponse { - repeated string tableNames = 1; + repeated string tableNames = 1; } message Labels { - string name = 1; - string value = 2; + string name = 1; + string value = 2; } - - diff --git a/pkg/storage/stores/series/index/caching_index_client.proto b/pkg/storage/stores/series/index/caching_index_client.proto index 38e746ae70ebf..a89ddf99e77d0 100644 --- a/pkg/storage/stores/series/index/caching_index_client.proto +++ b/pkg/storage/stores/series/index/caching_index_client.proto @@ -8,18 +8,24 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message CacheEntry { - bytes Column = 1 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; - bytes Value = 2 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; + bytes Column = 1 [ + (gogoproto.customtype) = "Bytes", + (gogoproto.nullable) = false + ]; + bytes Value = 2 [ + (gogoproto.customtype) = "Bytes", + (gogoproto.nullable) = false + ]; } message ReadBatch { - repeated CacheEntry entries = 1 [(gogoproto.nullable) = false]; - string key = 2; + repeated CacheEntry entries = 1 [(gogoproto.nullable) = false]; + string key = 2; - // The time at which the key expires. - int64 expiry = 3; + // The time at which the key expires. + int64 expiry = 3; - // The number of entries; used for cardinality limiting. - // entries will be empty when this is set. - int32 cardinality = 4; + // The number of entries; used for cardinality limiting. + // entries will be empty when this is set. + int32 cardinality = 4; } diff --git a/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.pb.go b/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.pb.go index 65afacd002469..1b5149e1251ad 100644 --- a/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.pb.go +++ b/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.pb.go @@ -506,7 +506,7 @@ func init() { } var fileDescriptor_33a7bd4603d312b2 = []byte{ - // 705 bytes of a gzipped FileDescriptorProto + // 704 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x4e, 0x10, 0xf7, 0x26, 0xe9, 0x47, 0xa6, 0xf9, 0x7f, 0xb0, 0x45, 0x10, 0x19, 0xba, 0x29, 0x46, 0xa2, 0x11, 0x12, 0x31, 0x2a, 0xbd, 0x21, 0x2e, 0x2d, 0x50, 0x55, 0x94, 0x0a, 0x16, 0xa8, 0xe0, 0x84, @@ -515,43 +515,42 @@ var fileDescriptor_33a7bd4603d312b2 = []byte{ 0x63, 0x3b, 0x1f, 0x6d, 0x91, 0x4a, 0x4f, 0xd9, 0xf9, 0xcd, 0x6f, 0x66, 0xe7, 0x37, 0xe3, 0xd9, 0xc0, 0xc3, 0xd6, 0xa6, 0x6d, 0xfa, 0x81, 0x54, 0x96, 0x2d, 0xf0, 0x57, 0xf8, 0xa6, 0xef, 0x34, 0x5a, 0x2d, 0xa1, 0xcc, 0x46, 0x73, 0x43, 0x6c, 0xdb, 0x56, 0x20, 0xda, 0xd6, 0xce, 0x80, 0xd1, - 0xaa, 0x9a, 0xbd, 0x53, 0xa5, 0xa5, 0x64, 0x20, 0xe9, 0xbf, 0x83, 0x5e, 0xfd, 0x5a, 0x37, 0xab, - 0x2b, 0x6d, 0xf4, 0x26, 0x87, 0x88, 0xac, 0xdf, 0xb1, 0x1b, 0x81, 0x13, 0x56, 0x2b, 0x35, 0xe9, - 0x99, 0xb6, 0xb4, 0xa5, 0x89, 0x70, 0x35, 0xac, 0xa3, 0x15, 0x85, 0x74, 0x4f, 0x11, 0xdd, 0xf8, - 0x98, 0x81, 0xd2, 0xaa, 0x55, 0x15, 0xee, 0xba, 0xe5, 0x86, 0xc2, 0x7f, 0x2c, 0xd5, 0x53, 0x11, - 0xa8, 0x46, 0x6d, 0xcd, 0xf2, 0x04, 0x17, 0x5b, 0xa1, 0xf0, 0x03, 0x5a, 0x82, 0x29, 0x0f, 0xc1, - 0xb7, 0x4d, 0xcb, 0x13, 0x45, 0x32, 0x4b, 0xca, 0x79, 0x0e, 0x5e, 0xc2, 0xa3, 0x33, 0x00, 0x6e, - 0x37, 0x47, 0xe4, 0xcf, 0xa0, 0x3f, 0x8f, 0x08, 0xba, 0x97, 0x20, 0x57, 0x57, 0xd2, 0x2b, 0x66, - 0x67, 0x49, 0x39, 0xbb, 0x68, 0xee, 0x1e, 0x94, 0xb4, 0x1f, 0x07, 0xa5, 0xb9, 0xbe, 0x42, 0x5b, - 0x4a, 0x7a, 0x22, 0x70, 0x44, 0xe8, 0x9b, 0x35, 0xe9, 0x79, 0xb2, 0x69, 0x7a, 0x72, 0x43, 0xb8, - 0x95, 0x97, 0x0d, 0x4f, 0x70, 0x0c, 0xa6, 0x2b, 0x30, 0x11, 0x38, 0x4a, 0x86, 0xb6, 0x53, 0xcc, - 0x9d, 0x2f, 0x4f, 0x1c, 0x4f, 0x75, 0x98, 0xf4, 0xac, 0xa0, 0xe6, 0x08, 0xe5, 0x17, 0xc7, 0xb0, - 0xd8, 0xc4, 0x36, 0xbe, 0x13, 0x60, 0xab, 0x71, 0xe5, 0xe7, 0x6c, 0x47, 0xac, 0x37, 0x73, 0x41, - 0x7a, 0xb3, 0x7f, 0xa7, 0xd7, 0x98, 0x83, 0x7f, 0x50, 0x12, 0x17, 0x7e, 0x4b, 0x36, 0x7d, 0x41, - 0xaf, 0xc0, 0xf8, 0x7b, 0x1c, 0x77, 0x91, 0xcc, 0x66, 0xcb, 0x79, 0xde, 0xb3, 0x8c, 0x6f, 0x04, - 0xe8, 0xb2, 0x08, 0x96, 0x9c, 0xb0, 0xb9, 0xc9, 0x45, 0x3d, 0x16, 0x1c, 0xeb, 0x21, 0x17, 0xa4, - 0x27, 0x73, 0x81, 0xf3, 0xcb, 0x0e, 0xcd, 0xef, 0x01, 0x4c, 0x0f, 0x28, 0xe8, 0x29, 0xbe, 0x05, - 0x39, 0x25, 0xea, 0x91, 0xde, 0xa9, 0x79, 0x5a, 0x49, 0x96, 0x26, 0x61, 0xa2, 0xdf, 0x78, 0x03, - 0xf4, 0x79, 0x28, 0xd4, 0xce, 0x4a, 0x77, 0xe3, 0x92, 0x68, 0x1d, 0x26, 0x11, 0x7d, 0x22, 0x76, - 0x7a, 0xe3, 0x4e, 0x6c, 0x3a, 0x07, 0x39, 0x25, 0xdb, 0x7e, 0x31, 0x83, 0x99, 0xa7, 0x2b, 0x83, - 0xbb, 0x5a, 0xe1, 0xb2, 0xcd, 0x91, 0x60, 0xdc, 0x87, 0x2c, 0x97, 0x6d, 0xca, 0x00, 0x94, 0xd5, - 0xb4, 0x05, 0xee, 0x1b, 0x66, 0x2b, 0xf0, 0x3e, 0x84, 0x5e, 0x86, 0x31, 0x9c, 0x06, 0x76, 0xa9, - 0xc0, 0x23, 0xc3, 0x58, 0x81, 0x4b, 0xfd, 0x75, 0x45, 0x73, 0x59, 0x80, 0x89, 0x2e, 0xd8, 0x10, - 0xb1, 0x2e, 0x7d, 0xf8, 0x76, 0xa4, 0x63, 0x20, 0x8f, 0xa9, 0xc6, 0x57, 0x02, 0x90, 0xe2, 0xf4, - 0x3a, 0xe4, 0x03, 0xab, 0xea, 0x8a, 0xb5, 0xf4, 0x5b, 0x4e, 0x81, 0xae, 0xd7, 0xb1, 0x7c, 0x67, - 0x3d, 0xa9, 0x28, 0xcf, 0x53, 0x80, 0xde, 0x86, 0xff, 0xd3, 0xca, 0x9f, 0x29, 0x51, 0x6f, 0x6c, - 0xe3, 0x40, 0x0a, 0x7c, 0x04, 0xa7, 0x65, 0xf8, 0x2f, 0xc5, 0x5e, 0x04, 0x96, 0x0a, 0x70, 0x8f, - 0x0b, 0x7c, 0x18, 0xee, 0x76, 0x08, 0x45, 0x3f, 0xda, 0x0a, 0x2d, 0x17, 0x17, 0xb4, 0xc0, 0xfb, - 0x90, 0xf9, 0xcf, 0x59, 0x28, 0xa0, 0x80, 0xe5, 0x48, 0x27, 0x7d, 0x05, 0x90, 0x36, 0x87, 0xde, - 0x18, 0x6e, 0xc2, 0x48, 0xe3, 0x74, 0xe3, 0x2c, 0x4a, 0x34, 0xf3, 0xbb, 0x84, 0xbe, 0x86, 0xa9, - 0xbe, 0x4f, 0x89, 0x8e, 0x04, 0x8d, 0x6e, 0x8a, 0x7e, 0xf3, 0x4c, 0x4e, 0x94, 0xd9, 0xd0, 0xe8, - 0x3b, 0xb8, 0x7a, 0xca, 0x1b, 0x43, 0x2b, 0xc3, 0x19, 0xce, 0x7e, 0x8c, 0xf4, 0x99, 0x13, 0xf9, - 0x7d, 0x77, 0xb9, 0x50, 0x3c, 0xed, 0x7d, 0xa7, 0xe6, 0x89, 0xc1, 0xa7, 0xff, 0x13, 0xfc, 0xf1, - 0xb6, 0xc5, 0x85, 0xbd, 0x43, 0xa6, 0xed, 0x1f, 0x32, 0xed, 0xf8, 0x90, 0x91, 0x0f, 0x1d, 0x46, - 0xbe, 0x74, 0x18, 0xd9, 0xed, 0x30, 0xb2, 0xd7, 0x61, 0xe4, 0x67, 0x87, 0x91, 0x5f, 0x1d, 0xa6, - 0x1d, 0x77, 0x18, 0xf9, 0x74, 0xc4, 0xb4, 0xbd, 0x23, 0xa6, 0xed, 0x1f, 0x31, 0xad, 0x3a, 0x8e, - 0xbb, 0x78, 0xef, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0x48, 0x2d, 0xb9, 0x2f, 0x07, 0x00, - 0x00, + 0xaa, 0x9a, 0xbd, 0x53, 0xa5, 0xa5, 0x64, 0x20, 0xe9, 0xbf, 0x83, 0x5e, 0xfd, 0x8e, 0xdd, 0x08, + 0x9c, 0xb0, 0x5a, 0xa9, 0x49, 0xcf, 0xb4, 0xa5, 0x2d, 0x4d, 0xa4, 0x55, 0xc3, 0x3a, 0x5a, 0x68, + 0xe0, 0x29, 0x0a, 0xd7, 0xaf, 0x75, 0x8b, 0x70, 0xa5, 0x1d, 0x39, 0xe2, 0x43, 0xe4, 0x34, 0x3e, + 0x66, 0xa0, 0xb4, 0x6a, 0x55, 0x85, 0xbb, 0x6e, 0xb9, 0xa1, 0xf0, 0x1f, 0x4b, 0xf5, 0x54, 0x04, + 0xaa, 0x51, 0x5b, 0xb3, 0x3c, 0xc1, 0xc5, 0x56, 0x28, 0xfc, 0x80, 0x96, 0x60, 0xca, 0x43, 0xf0, + 0x6d, 0xd3, 0xf2, 0x44, 0x91, 0xcc, 0x92, 0x72, 0x9e, 0x83, 0x97, 0xf0, 0xe8, 0x0c, 0x80, 0xdb, + 0xcd, 0x11, 0xf9, 0x33, 0xe8, 0xcf, 0x23, 0x82, 0xee, 0x25, 0xc8, 0xd5, 0x95, 0xf4, 0x8a, 0xd9, + 0x59, 0x52, 0xce, 0x2e, 0x9a, 0xbb, 0x07, 0x25, 0xed, 0xc7, 0x41, 0x69, 0xae, 0x4f, 0x45, 0x4b, + 0x49, 0x4f, 0x04, 0x8e, 0x08, 0x7d, 0xb3, 0x26, 0x3d, 0x4f, 0x36, 0x4d, 0x4f, 0x6e, 0x08, 0xb7, + 0xf2, 0xb2, 0xe1, 0x09, 0x8e, 0xc1, 0x74, 0x05, 0x26, 0x02, 0x47, 0xc9, 0xd0, 0x76, 0x8a, 0xb9, + 0xf3, 0xe5, 0x89, 0xe3, 0xa9, 0x0e, 0x93, 0x9e, 0x15, 0xd4, 0x1c, 0xa1, 0xfc, 0xe2, 0x18, 0x16, + 0x9b, 0xd8, 0xc6, 0x77, 0x02, 0x6c, 0x35, 0xae, 0xfc, 0x9c, 0xed, 0x88, 0xf5, 0x66, 0x2e, 0x48, + 0x6f, 0xf6, 0xef, 0xf4, 0x1a, 0x73, 0xf0, 0x0f, 0x4a, 0xe2, 0xc2, 0x6f, 0xc9, 0xa6, 0x2f, 0xe8, + 0x15, 0x18, 0x7f, 0x8f, 0xe3, 0x2e, 0x92, 0xd9, 0x6c, 0x39, 0xcf, 0x7b, 0x96, 0xf1, 0x8d, 0x00, + 0x5d, 0x16, 0xc1, 0x92, 0x13, 0x36, 0x37, 0xb9, 0xa8, 0xc7, 0x82, 0x63, 0x3d, 0xe4, 0x82, 0xf4, + 0x64, 0x2e, 0x70, 0x7e, 0xd9, 0xa1, 0xf9, 0x3d, 0x80, 0xe9, 0x01, 0x05, 0x3d, 0xc5, 0xb7, 0x20, + 0xa7, 0x44, 0x3d, 0xd2, 0x3b, 0x35, 0x4f, 0x2b, 0xc9, 0x16, 0x24, 0x4c, 0xf4, 0x1b, 0x6f, 0x80, + 0x3e, 0x0f, 0x85, 0xda, 0x59, 0xe9, 0x6e, 0x5c, 0x12, 0xad, 0xc3, 0x24, 0xa2, 0x4f, 0xc4, 0x4e, + 0x6f, 0xdc, 0x89, 0x4d, 0xe7, 0x20, 0xa7, 0x64, 0xdb, 0x2f, 0x66, 0x30, 0xf3, 0x74, 0x65, 0x70, + 0x57, 0x2b, 0x5c, 0xb6, 0x39, 0x12, 0x8c, 0xfb, 0x90, 0xe5, 0xb2, 0x4d, 0x19, 0x80, 0xb2, 0x9a, + 0xb6, 0xc0, 0x7d, 0xc3, 0x6c, 0x05, 0xde, 0x87, 0xd0, 0xcb, 0x30, 0x86, 0xd3, 0xc0, 0x2e, 0x15, + 0x78, 0x64, 0x18, 0x2b, 0x70, 0xa9, 0xbf, 0xae, 0x68, 0x2e, 0x0b, 0x30, 0xd1, 0x05, 0x1b, 0x22, + 0xd6, 0xa5, 0x0f, 0xdf, 0x8e, 0x74, 0x0c, 0xe4, 0x31, 0xd5, 0xf8, 0x4a, 0x00, 0x52, 0x9c, 0x5e, + 0x87, 0x7c, 0x60, 0x55, 0x5d, 0xb1, 0x96, 0x7e, 0xcb, 0x29, 0xd0, 0xf5, 0x3a, 0x96, 0xef, 0xac, + 0x27, 0x15, 0xe5, 0x79, 0x0a, 0xd0, 0xdb, 0xf0, 0x7f, 0x5a, 0xf9, 0x33, 0x25, 0xea, 0x8d, 0x6d, + 0x1c, 0x48, 0x81, 0x8f, 0xe0, 0xb4, 0x0c, 0xff, 0xa5, 0xd8, 0x8b, 0xc0, 0x52, 0x01, 0xee, 0x71, + 0x81, 0x0f, 0xc3, 0xdd, 0x0e, 0xa1, 0xe8, 0x47, 0x5b, 0xa1, 0xe5, 0xe2, 0x82, 0x16, 0x78, 0x1f, + 0x32, 0xff, 0x39, 0x0b, 0x05, 0x14, 0xb0, 0x1c, 0xe9, 0xa4, 0xaf, 0x00, 0xd2, 0xe6, 0xd0, 0x1b, + 0xc3, 0x4d, 0x18, 0x69, 0x9c, 0x6e, 0x9c, 0x45, 0x89, 0x66, 0x7e, 0x97, 0xd0, 0xd7, 0x30, 0xd5, + 0xf7, 0x29, 0xd1, 0x91, 0xa0, 0xd1, 0x4d, 0xd1, 0x6f, 0x9e, 0xc9, 0x89, 0x32, 0x1b, 0x1a, 0x7d, + 0x07, 0x57, 0x4f, 0x79, 0x63, 0x68, 0x65, 0x38, 0xc3, 0xd9, 0x8f, 0x91, 0x3e, 0x73, 0x22, 0xbf, + 0xef, 0x2e, 0x17, 0x8a, 0xa7, 0xbd, 0xef, 0xd4, 0x3c, 0x31, 0xf8, 0xf4, 0x7f, 0x82, 0x3f, 0xde, + 0xb6, 0xb8, 0xb0, 0x77, 0xc8, 0xb4, 0xfd, 0x43, 0xa6, 0x1d, 0x1f, 0x32, 0xf2, 0xa1, 0xc3, 0xc8, + 0x97, 0x0e, 0x23, 0xbb, 0x1d, 0x46, 0xf6, 0x3a, 0x8c, 0xfc, 0xec, 0x30, 0xf2, 0xab, 0xc3, 0xb4, + 0xe3, 0x0e, 0x23, 0x9f, 0x8e, 0x98, 0xb6, 0x77, 0xc4, 0xb4, 0xfd, 0x23, 0xa6, 0x55, 0xc7, 0x71, + 0x17, 0xef, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x75, 0xf7, 0x88, 0x4a, 0x2f, 0x07, 0x00, 0x00, } func (this *LabelValuesForMetricNameRequest) Equal(that interface{}) bool { diff --git a/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto b/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto index 24ab75d135608..183e027d31444 100644 --- a/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto +++ b/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto @@ -2,66 +2,85 @@ syntax = "proto3"; package indexgatewaypb; -import "pkg/logproto/logproto.proto"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - +import "pkg/logproto/logproto.proto"; service IndexGateway { - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); - /// GetChunkRef returns chunk reference that match the provided label matchers - rpc GetChunkRef(GetChunkRefRequest) returns (GetChunkRefResponse) {}; - rpc LabelNamesForMetricName(LabelNamesForMetricNameRequest) returns (LabelResponse) {}; - rpc LabelValuesForMetricName(LabelValuesForMetricNameRequest) returns (LabelResponse) {}; + /// QueryIndex reads the indexes required for given query & sends back the batch of rows + /// in rpc streams + rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); + /// GetChunkRef returns chunk reference that match the provided label matchers + rpc GetChunkRef(GetChunkRefRequest) returns (GetChunkRefResponse) {} + + rpc LabelNamesForMetricName(LabelNamesForMetricNameRequest) returns (LabelResponse) {} + + rpc LabelValuesForMetricName(LabelValuesForMetricNameRequest) returns (LabelResponse) {} } message LabelValuesForMetricNameRequest { - string metric_name = 1; - string label_name = 2; - int64 from = 3 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - int64 through = 4 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - string matchers = 5; + string metric_name = 1; + string label_name = 2; + int64 from = 3 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 4 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + string matchers = 5; } message LabelNamesForMetricNameRequest { - string metric_name = 1; - int64 from = 2 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - int64 through = 3 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; + string metric_name = 1; + int64 from = 2 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 3 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; } message LabelResponse { - repeated string values = 1; + repeated string values = 1; } message GetChunkRefRequest { - int64 from = 1 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - int64 through = 2 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - string matchers = 3; + int64 from = 1 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 2 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + string matchers = 3; } message GetChunkRefResponse { - repeated logproto.ChunkRef refs = 1; + repeated logproto.ChunkRef refs = 1; } message QueryIndexResponse { - string QueryKey = 1; - repeated Row rows = 2; + string QueryKey = 1; + repeated Row rows = 2; } message Row { - bytes rangeValue = 1; - bytes value = 2; + bytes rangeValue = 1; + bytes value = 2; } message QueryIndexRequest { - repeated IndexQuery Queries = 1; + repeated IndexQuery Queries = 1; } message IndexQuery { - string tableName = 1; - string hashValue = 2; - bytes rangeValuePrefix = 3; - bytes rangeValueStart = 4; - bytes valueEqual = 5; + string tableName = 1; + string hashValue = 2; + bytes rangeValuePrefix = 3; + bytes rangeValueStart = 4; + bytes valueEqual = 5; } From 0c721b49d64a24518763ad6b56fea7364ef9bb37 Mon Sep 17 00:00:00 2001 From: JordanRushing Date: Fri, 29 Apr 2022 08:02:15 -0500 Subject: [PATCH 032/223] Add IndexGateway Query Readiness Duration panel to `Loki - Reads Resources` dashboard in production/loki-mixin (#6014) * Add IndexGateway Query Readiness Duration panel to `Loki - Reads Resources` dashboard in production/loki-mixin Signed-off-by: JordanRushing * Satisfy `make lint-jsonnet` by fixing query used in IndexGateway Query Readiness dashboard Signed-off-by: JordanRushing --- .../loki-mixin/dashboards/loki-reads-resources.libsonnet | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet index 20eec23e287b6..25e94fe887859 100644 --- a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet +++ b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet @@ -115,6 +115,13 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( $.containerDiskSpaceUtilizationPanel('Disk Space Utilization', 'index-gateway'), ) + .addPanel( + $.panel('Query Readiness Duration') + + $.queryPanel( + ['loki_boltdb_shipper_query_readiness_duration_seconds{%s}' % $.namespaceMatcher()], ['duration'] + ) + + { yaxes: $.yaxes('s') }, + ) ) .addRow( $.row('Ingester') From 4891e28bff86840722fff24768a71d011c8e34cb Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Fri, 29 Apr 2022 15:02:36 +0200 Subject: [PATCH 033/223] Decrease log level for "failed to put to memcached" (#6018) Since failing to put to memcache does not have an operational impact on the write path, this log message should not be an error. Make it a warning instead. Signed-off-by: Christian Haudum --- pkg/storage/chunk/cache/memcached.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storage/chunk/cache/memcached.go b/pkg/storage/chunk/cache/memcached.go index 39acf24ce0c4d..8da4be22fa856 100644 --- a/pkg/storage/chunk/cache/memcached.go +++ b/pkg/storage/chunk/cache/memcached.go @@ -217,7 +217,7 @@ func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) err return c.memcache.Set(&item) }) if cacheErr != nil { - level.Error(c.logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) + level.Warn(c.logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) err = cacheErr } } From 5d51e5a54f42dc9dc920bd05d5d01ce144940a9b Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 29 Apr 2022 16:51:59 +0200 Subject: [PATCH 034/223] Improve batching in boltdb index (#6044) * Improve batching in boltdb index * review feedback --- .../chunk/client/local/boltdb_index_client.go | 166 ++++++++---------- .../client/local/boltdb_index_client_test.go | 47 +++++ 2 files changed, 125 insertions(+), 88 deletions(-) diff --git a/pkg/storage/chunk/client/local/boltdb_index_client.go b/pkg/storage/chunk/client/local/boltdb_index_client.go index c65a498596826..67dc97d6df028 100644 --- a/pkg/storage/chunk/client/local/boltdb_index_client.go +++ b/pkg/storage/chunk/client/local/boltdb_index_client.go @@ -258,69 +258,104 @@ func (b *BoltIndexClient) QueryDB(ctx context.Context, db *bbolt.DB, bucketName } func (b *BoltIndexClient) QueryWithCursor(_ context.Context, c *bbolt.Cursor, query index.Query, callback index.QueryPagesCallback) error { - var start []byte - if len(query.RangeValuePrefix) > 0 { - start = []byte(query.HashValue + separator + string(query.RangeValuePrefix)) - } else if len(query.RangeValueStart) > 0 { - start = []byte(query.HashValue + separator + string(query.RangeValueStart)) - } else { - start = []byte(query.HashValue + separator) - } + batch := batchPool.Get().(*cursorBatch) + defer batchPool.Put(batch) + + batch.reset(c, &query) + callback(query, batch) + return nil +} - rowPrefix := []byte(query.HashValue + separator) +var batchPool = sync.Pool{ + New: func() interface{} { + return &cursorBatch{ + start: bytes.NewBuffer(make([]byte, 0, 1024)), + rowPrefix: bytes.NewBuffer(make([]byte, 0, 1024)), + } + }, +} - // sync.WaitGroup is needed to wait for the caller to finish processing all the index entries being streamed - wg := sync.WaitGroup{} - batch := newReadBatch() - defer func() { - batch.done() - wg.Wait() - }() +type cursorBatch struct { + cursor *bbolt.Cursor + query *index.Query + start *bytes.Buffer + rowPrefix *bytes.Buffer + seeked bool - callbackDone := false + currRangeValue []byte + currValue []byte +} + +func (c *cursorBatch) Iterator() index.ReadBatchIterator { + return c +} + +func (c *cursorBatch) nextItem() ([]byte, []byte) { + if !c.seeked { + if len(c.query.RangeValuePrefix) > 0 { + c.start.WriteString(c.query.HashValue) + c.start.WriteString(separator) + c.start.Write(c.query.RangeValuePrefix) + } else if len(c.query.RangeValueStart) > 0 { + c.start.WriteString(c.query.HashValue) + c.start.WriteString(separator) + c.start.Write(c.query.RangeValueStart) + } else { + c.start.WriteString(c.query.HashValue) + c.start.WriteString(separator) + } + c.rowPrefix.WriteString(c.query.HashValue) + c.rowPrefix.WriteString(separator) + c.seeked = true + return c.cursor.Seek(c.start.Bytes()) + } + return c.cursor.Next() +} - for k, v := c.Seek(start); k != nil; k, v = c.Next() { - if !bytes.HasPrefix(k, rowPrefix) { +func (c *cursorBatch) Next() bool { + for k, v := c.nextItem(); k != nil; k, v = c.nextItem() { + if !bytes.HasPrefix(k, c.rowPrefix.Bytes()) { break } - if len(query.RangeValuePrefix) > 0 && !bytes.HasPrefix(k, start) { + if len(c.query.RangeValuePrefix) > 0 && !bytes.HasPrefix(k, c.start.Bytes()) { break } - if len(query.ValueEqual) > 0 && !bytes.Equal(v, query.ValueEqual) { + if len(c.query.ValueEqual) > 0 && !bytes.Equal(v, c.query.ValueEqual) { continue } - // we need to do callback only once to pass the batch iterator - if !callbackDone { - wg.Add(1) - // do the callback in a goroutine to stream back the index entries - go func() { - // wait for callback to finish processing the batch and return - defer wg.Done() - callback(query, batch) - }() - callbackDone = true - } - // make a copy since k, v are only valid for the life of the transaction. // See: https://godoc.org/github.com/boltdb/bolt#Cursor.Seek - rangeValue := make([]byte, len(k)-len(rowPrefix)) - copy(rangeValue, k[len(rowPrefix):]) + rangeValue := make([]byte, len(k)-c.rowPrefix.Len()) + copy(rangeValue, k[c.rowPrefix.Len():]) value := make([]byte, len(v)) copy(value, v) - err := batch.send(singleResponse{ - rangeValue: rangeValue, - value: value, - }) - if err != nil { - return errors.Wrap(err, "failed to send row while processing boltdb index query") - } + c.currRangeValue = rangeValue + c.currValue = value + return true } + return false +} - return nil +func (c *cursorBatch) RangeValue() []byte { + return c.currRangeValue +} + +func (c *cursorBatch) Value() []byte { + return c.currValue +} + +func (c *cursorBatch) reset(cur *bbolt.Cursor, q *index.Query) { + c.currRangeValue = nil + c.currValue = nil + c.seeked = false + c.cursor = cur + c.query = q + c.rowPrefix.Reset() + c.start.Reset() } type TableWrites struct { @@ -359,51 +394,6 @@ func (b *BoltWriteBatch) Add(tableName, hashValue string, rangeValue []byte, val writes.puts[key] = value } -type singleResponse struct { - rangeValue []byte - value []byte -} - -type readBatch struct { - respChan chan singleResponse - curr singleResponse -} - -func newReadBatch() *readBatch { - return &readBatch{respChan: make(chan singleResponse)} -} - -func (r *readBatch) Iterator() index.ReadBatchIterator { - return r -} - -func (r *readBatch) Next() bool { - var ok bool - r.curr, ok = <-r.respChan - return ok -} - -func (r *readBatch) RangeValue() []byte { - return r.curr.rangeValue -} - -func (r *readBatch) Value() []byte { - return r.curr.value -} - -func (r *readBatch) done() { - close(r.respChan) -} - -func (r *readBatch) send(resp singleResponse) error { - select { - case r.respChan <- resp: - return nil - case <-time.After(10 * time.Second): - return errors.New("timed out sending response") - } -} - // Open the database. // Set Timeout to avoid obtaining file lock wait indefinitely. func OpenBoltdbFile(path string) (*bbolt.DB, error) { diff --git a/pkg/storage/chunk/client/local/boltdb_index_client_test.go b/pkg/storage/chunk/client/local/boltdb_index_client_test.go index 82254a44c1757..6e23bdbbaade3 100644 --- a/pkg/storage/chunk/client/local/boltdb_index_client_test.go +++ b/pkg/storage/chunk/client/local/boltdb_index_client_test.go @@ -259,3 +259,50 @@ func TestBoltDB_Writes(t *testing.T) { }) } } + +func Benchmark_Query(b *testing.B) { + tableName := "test" + dirname := b.TempDir() + + indexClient, err := NewBoltDBIndexClient(BoltDBConfig{ + Directory: dirname, + }) + require.NoError(b, err) + + tableClient, err := NewTableClient(dirname) + require.NoError(b, err) + + err = tableClient.CreateTable(context.Background(), config.TableDesc{ + Name: tableName, + }) + require.NoError(b, err) + + batch := indexClient.NewWriteBatch() + batch.Add(tableName, fmt.Sprintf("hash%s", "test"), []byte(fmt.Sprintf("range%s", "value")), nil) + + err = indexClient.BatchWrite(context.Background(), batch) + require.NoError(b, err) + + // try to create the same file which is already existing + err = tableClient.CreateTable(context.Background(), config.TableDesc{ + Name: tableName, + }) + require.NoError(b, err) + + // make sure file content is not modified + entry := index.Query{ + TableName: tableName, + HashValue: fmt.Sprintf("hash%s", "test"), + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = indexClient.query(context.Background(), entry, func(_ index.Query, read index.ReadBatchResult) bool { + iter := read.Iterator() + for iter.Next() { + } + return true + }) + require.NoError(b, err) + } +} From 6b6521f791dec60b371009c1412d4dd8a2462eb7 Mon Sep 17 00:00:00 2001 From: Susana Ferreira Date: Fri, 29 Apr 2022 16:55:37 +0200 Subject: [PATCH 035/223] Add metrics to range mapper (#6030) * Refactor ShardingMetrics to MapperMetrics * Add mapper metrics to range mapper --- pkg/logql/downstream.go | 4 +- pkg/logql/downstream_test.go | 11 +- pkg/logql/mapper_metrics.go | 81 ++++++++++++++ pkg/logql/rangemapper.go | 67 ++++++++---- pkg/logql/rangemapper_test.go | 8 +- pkg/logql/shardmapper.go | 109 ++++--------------- pkg/logql/shardmapper_test.go | 12 +- pkg/querier/queryrange/metrics.go | 16 ++- pkg/querier/queryrange/querysharding.go | 16 +-- pkg/querier/queryrange/querysharding_test.go | 2 +- pkg/querier/queryrange/roundtrip.go | 10 +- pkg/querier/queryrange/split_by_range.go | 23 ++-- 12 files changed, 206 insertions(+), 153 deletions(-) create mode 100644 pkg/logql/mapper_metrics.go diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go index 568fa22f65134..1703a034a07a2 100644 --- a/pkg/logql/downstream.go +++ b/pkg/logql/downstream.go @@ -46,17 +46,15 @@ type DownstreamEngine struct { timeout time.Duration downstreamable Downstreamable limits Limits - metrics *ShardingMetrics } // NewDownstreamEngine constructs a *DownstreamEngine -func NewDownstreamEngine(opts EngineOpts, downstreamable Downstreamable, metrics *ShardingMetrics, limits Limits, logger log.Logger) *DownstreamEngine { +func NewDownstreamEngine(opts EngineOpts, downstreamable Downstreamable, limits Limits, logger log.Logger) *DownstreamEngine { opts.applyDefault() return &DownstreamEngine{ logger: logger, timeout: opts.Timeout, downstreamable: downstreamable, - metrics: metrics, limits: limits, } } diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go index c8ffebfcf11fa..b584d721b22c9 100644 --- a/pkg/logql/downstream_test.go +++ b/pkg/logql/downstream_test.go @@ -14,7 +14,8 @@ import ( "github.com/grafana/loki/pkg/logproto" ) -var nilMetrics = NewShardingMetrics(nil) +var nilShardMetrics = NewShardMapperMetrics(nil) +var nilRangeMetrics = NewRangeMapperMetrics(nil) func TestMappingEquivalence(t *testing.T) { var ( @@ -61,7 +62,7 @@ func TestMappingEquivalence(t *testing.T) { opts := EngineOpts{} regular := NewEngine(opts, q, NoLimits, log.NewNopLogger()) - sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, nilMetrics, NoLimits, log.NewNopLogger()) + sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger()) t.Run(tc.query, func(t *testing.T) { params := NewLiteralParams( @@ -77,7 +78,7 @@ func TestMappingEquivalence(t *testing.T) { qry := regular.Query(params) ctx := user.InjectOrgID(context.Background(), "fake") - mapper, err := NewShardMapper(shards, nilMetrics) + mapper, err := NewShardMapper(shards, nilShardMetrics) require.Nil(t, err) _, mapped, err := mapper.Parse(tc.query) require.Nil(t, err) @@ -260,7 +261,7 @@ func TestRangeMappingEquivalence(t *testing.T) { opts := EngineOpts{} regularEngine := NewEngine(opts, q, NoLimits, log.NewNopLogger()) - downstreamEngine := NewDownstreamEngine(opts, MockDownstreamer{regularEngine}, nilMetrics, NoLimits, log.NewNopLogger()) + downstreamEngine := NewDownstreamEngine(opts, MockDownstreamer{regularEngine}, NoLimits, log.NewNopLogger()) t.Run(tc.query, func(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "fake") @@ -282,7 +283,7 @@ func TestRangeMappingEquivalence(t *testing.T) { require.Nil(t, err) // Downstream engine - split by range - rangeMapper, err := NewRangeMapper(tc.splitByInterval) + rangeMapper, err := NewRangeMapper(tc.splitByInterval, nilRangeMetrics) require.Nil(t, err) noop, rangeExpr, err := rangeMapper.Parse(tc.query) require.Nil(t, err) diff --git a/pkg/logql/mapper_metrics.go b/pkg/logql/mapper_metrics.go new file mode 100644 index 0000000000000..1e9e8d0df6d13 --- /dev/null +++ b/pkg/logql/mapper_metrics.go @@ -0,0 +1,81 @@ +package logql + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// expression type used in metrics +const ( + StreamsKey = "streams" + MetricsKey = "metrics" +) + +// parsing evaluation result used in metrics +const ( + SuccessKey = "success" + FailureKey = "failure" + NoopKey = "noop" +) + +// MapperMetrics is the metrics wrapper used in logql mapping (shard and range) +type MapperMetrics struct { + DownstreamQueries *prometheus.CounterVec // downstream queries total, partitioned by streams/metrics + ParsedQueries *prometheus.CounterVec // parsed ASTs total, partitioned by success/failure/noop + DownstreamFactor prometheus.Histogram // per request downstream factor +} + +func newMapperMetrics(registerer prometheus.Registerer, mapper string) *MapperMetrics { + return &MapperMetrics{ + DownstreamQueries: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Namespace: "loki", + Name: "query_frontend_shards_total", + Help: "Number of downstream queries by expression type", + ConstLabels: prometheus.Labels{"mapper": mapper}, + }, []string{"type"}), + ParsedQueries: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Namespace: "loki", + Name: "query_frontend_sharding_parsed_queries_total", + Help: "Number of parsed queries by evaluation type", + ConstLabels: prometheus.Labels{"mapper": mapper}, + }, []string{"type"}), + DownstreamFactor: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Namespace: "loki", + Name: "query_frontend_shard_factor", + Help: "Number of downstream queries per request", + Buckets: prometheus.LinearBuckets(0, 16, 4), + ConstLabels: prometheus.Labels{"mapper": mapper}, + }), + } +} + +// downstreamRecorder wraps a vector & histogram, providing an easy way to increment downstream counts. +// and unify them into histogram entries. +// NOT SAFE FOR CONCURRENT USE! We avoid introducing mutex locking here +// because AST mapping is single threaded. +type downstreamRecorder struct { + done bool + total int + *MapperMetrics +} + +// downstreamRecorder constructs a recorder using the underlying metrics. +func (m *MapperMetrics) downstreamRecorder() *downstreamRecorder { + return &downstreamRecorder{ + MapperMetrics: m, + } +} + +// Add increments both the downstream count and tracks it for the eventual histogram entry. +func (r *downstreamRecorder) Add(x int, key string) { + r.total += x + r.DownstreamQueries.WithLabelValues(key).Add(float64(x)) +} + +// Finish idemptotently records a histogram entry with the total downstream factor. +func (r *downstreamRecorder) Finish() { + if !r.done { + r.done = true + r.DownstreamFactor.Observe(float64(r.total)) + } +} diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go index f7ac61d824880..86cf2d00274ed 100644 --- a/pkg/logql/rangemapper.go +++ b/pkg/logql/rangemapper.go @@ -6,6 +6,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/grafana/loki/pkg/logql/syntax" util_log "github.com/grafana/loki/pkg/util/log" @@ -51,19 +52,25 @@ var splittableRangeVectorOp = map[string]struct{}{ // using the same rules as above. type RangeMapper struct { splitByInterval time.Duration + metrics *MapperMetrics } // NewRangeMapper creates a new RangeMapper instance with the given duration as // split interval. The interval must be greater than 0. -func NewRangeMapper(interval time.Duration) (RangeMapper, error) { +func NewRangeMapper(interval time.Duration, metrics *MapperMetrics) (RangeMapper, error) { if interval <= 0 { return RangeMapper{}, fmt.Errorf("cannot create RangeMapper with splitByInterval <= 0; got %s", interval) } return RangeMapper{ splitByInterval: interval, + metrics: metrics, }, nil } +func NewRangeMapperMetrics(registerer prometheus.Registerer) *MapperMetrics { + return newMapperMetrics(registerer, "range") +} + // Parse parses the given LogQL query string into a sample expression and // applies the rewrite rules for splitting it into a sample expression that can // be executed by the downstream engine. @@ -75,23 +82,36 @@ func (m RangeMapper) Parse(query string) (bool, syntax.Expr, error) { return true, nil, err } + recorder := m.metrics.downstreamRecorder() + if !isSplittableByRange(origExpr) { + m.metrics.ParsedQueries.WithLabelValues(NoopKey).Inc() return true, origExpr, nil } - modExpr, err := m.Map(origExpr, nil) + modExpr, err := m.Map(origExpr, nil, recorder) if err != nil { + m.metrics.ParsedQueries.WithLabelValues(FailureKey).Inc() return true, nil, err } - return origExpr.String() == modExpr.String(), modExpr, err + noop := origExpr.String() == modExpr.String() + if noop { + m.metrics.ParsedQueries.WithLabelValues(NoopKey).Inc() + } else { + m.metrics.ParsedQueries.WithLabelValues(SuccessKey).Inc() + } + + recorder.Finish() // only record metrics for successful mappings + + return noop, modExpr, err } // Map rewrites sample expression expr and returns the resultant sample expression to be executed by the downstream engine // It is called recursively on the expression tree. // The function takes an optional vector aggregation as second argument, that // is pushed down to the downstream expression. -func (m RangeMapper) Map(expr syntax.SampleExpr, vectorAggrPushdown *syntax.VectorAggregationExpr) (syntax.SampleExpr, error) { +func (m RangeMapper) Map(expr syntax.SampleExpr, vectorAggrPushdown *syntax.VectorAggregationExpr, recorder *downstreamRecorder) (syntax.SampleExpr, error) { // immediately clone the passed expr to avoid mutating the original expr, err := clone(expr) if err != nil { @@ -100,15 +120,15 @@ func (m RangeMapper) Map(expr syntax.SampleExpr, vectorAggrPushdown *syntax.Vect switch e := expr.(type) { case *syntax.VectorAggregationExpr: - return m.mapVectorAggregationExpr(e) + return m.mapVectorAggregationExpr(e, recorder) case *syntax.RangeAggregationExpr: - return m.mapRangeAggregationExpr(e, vectorAggrPushdown), nil + return m.mapRangeAggregationExpr(e, vectorAggrPushdown, recorder), nil case *syntax.BinOpExpr: - lhsMapped, err := m.Map(e.SampleExpr, vectorAggrPushdown) + lhsMapped, err := m.Map(e.SampleExpr, vectorAggrPushdown, recorder) if err != nil { return nil, err } - rhsMapped, err := m.Map(e.RHS, vectorAggrPushdown) + rhsMapped, err := m.Map(e.RHS, vectorAggrPushdown, recorder) if err != nil { return nil, err } @@ -158,7 +178,7 @@ func hasLabelExtractionStage(expr syntax.SampleExpr) bool { // Example: // rate({app="foo"}[2m]) // => (sum without (count_over_time({app="foo"}[1m]) ++ count_over_time({app="foo"}[1m]) offset 1m) / 120) -func (m RangeMapper) sumOverFullRange(expr *syntax.RangeAggregationExpr, overrideDownstream *syntax.VectorAggregationExpr, operation string, rangeInterval time.Duration) syntax.SampleExpr { +func (m RangeMapper) sumOverFullRange(expr *syntax.RangeAggregationExpr, overrideDownstream *syntax.VectorAggregationExpr, operation string, rangeInterval time.Duration, recorder *downstreamRecorder) syntax.SampleExpr { var downstreamExpr syntax.SampleExpr = &syntax.RangeAggregationExpr{ Left: expr.Left, Operation: operation, @@ -174,7 +194,7 @@ func (m RangeMapper) sumOverFullRange(expr *syntax.RangeAggregationExpr, overrid } return &syntax.BinOpExpr{ SampleExpr: &syntax.VectorAggregationExpr{ - Left: m.mapConcatSampleExpr(downstreamExpr, rangeInterval), + Left: m.mapConcatSampleExpr(downstreamExpr, rangeInterval, recorder), Grouping: &syntax.Grouping{ Without: true, }, @@ -194,7 +214,7 @@ func (m RangeMapper) sumOverFullRange(expr *syntax.RangeAggregationExpr, overrid // => min without (bytes_over_time({job="bar"} [1m]) ++ bytes_over_time({job="bar"} [1m] offset 1m)) // min by (app) (bytes_over_time({job="bar"} [2m]) // => min without (min by (app) (bytes_over_time({job="bar"} [1m])) ++ min by (app) (bytes_over_time({job="bar"} [1m] offset 1m))) -func (m RangeMapper) vectorAggrWithRangeDownstreams(expr *syntax.RangeAggregationExpr, vectorAggrPushdown *syntax.VectorAggregationExpr, op string, rangeInterval time.Duration) syntax.SampleExpr { +func (m RangeMapper) vectorAggrWithRangeDownstreams(expr *syntax.RangeAggregationExpr, vectorAggrPushdown *syntax.VectorAggregationExpr, op string, rangeInterval time.Duration, recorder *downstreamRecorder) syntax.SampleExpr { grouping := expr.Grouping if expr.Grouping == nil { grouping = &syntax.Grouping{ @@ -206,13 +226,13 @@ func (m RangeMapper) vectorAggrWithRangeDownstreams(expr *syntax.RangeAggregatio downstream = vectorAggrPushdown } return &syntax.VectorAggregationExpr{ - Left: m.mapConcatSampleExpr(downstream, rangeInterval), + Left: m.mapConcatSampleExpr(downstream, rangeInterval, recorder), Grouping: grouping, Operation: op, } } -// appendDownstream adds expression expr with a range interval 'interval' and offset 'offset' to the downstreams list. +// appendDownstream adds expression expr with a range interval 'interval' and offset 'offset' to the downstreams list. // Returns the updated downstream ConcatSampleExpr. func appendDownstream(downstreams *ConcatSampleExpr, expr syntax.SampleExpr, interval time.Duration, offset time.Duration) *ConcatSampleExpr { sampleExpr, _ := clone(expr) @@ -237,7 +257,7 @@ func appendDownstream(downstreams *ConcatSampleExpr, expr syntax.SampleExpr, int // mapConcatSampleExpr transform expr in multiple downstream subexpressions split by offset range interval // rangeInterval should be greater than m.splitByInterval, otherwise the resultant expression // will have an unnecessary aggregation operation -func (m RangeMapper) mapConcatSampleExpr(expr syntax.SampleExpr, rangeInterval time.Duration) syntax.SampleExpr { +func (m RangeMapper) mapConcatSampleExpr(expr syntax.SampleExpr, rangeInterval time.Duration, recorder *downstreamRecorder) syntax.SampleExpr { splitCount := int(rangeInterval / m.splitByInterval) if splitCount == 0 { @@ -249,16 +269,19 @@ func (m RangeMapper) mapConcatSampleExpr(expr syntax.SampleExpr, rangeInterval t for split = 0; split < splitCount; split++ { downstreams = appendDownstream(downstreams, expr, m.splitByInterval, time.Duration(split)*m.splitByInterval) } + recorder.Add(splitCount, MetricsKey) + // Add the remainder offset interval if rangeInterval%m.splitByInterval != 0 { offset := time.Duration(split) * m.splitByInterval downstreams = appendDownstream(downstreams, expr, rangeInterval-offset, offset) + recorder.Add(1, MetricsKey) } return downstreams } -func (m RangeMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr) (syntax.SampleExpr, error) { +func (m RangeMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr, recorder *downstreamRecorder) (syntax.SampleExpr, error) { rangeInterval := getRangeInterval(expr) // in case the interval is smaller than the configured split interval, @@ -277,7 +300,7 @@ func (m RangeMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr } // Split the vector aggregation's inner expression - lhsMapped, err := m.Map(expr.Left, vectorAggrPushdown) + lhsMapped, err := m.Map(expr.Left, vectorAggrPushdown, recorder) if err != nil { return nil, err } @@ -297,7 +320,7 @@ func (m RangeMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr // contains the initial query which will be the downstream expression with a split range interval. // Example: `sum by (a) (bytes_over_time)` // Is mapped to `sum by (a) (sum without downstream++downstream++...)` -func (m RangeMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, vectorAggrPushdown *syntax.VectorAggregationExpr) syntax.SampleExpr { +func (m RangeMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, vectorAggrPushdown *syntax.VectorAggregationExpr, recorder *downstreamRecorder) syntax.SampleExpr { rangeInterval := getRangeInterval(expr) // in case the interval is smaller than the configured split interval, @@ -313,15 +336,15 @@ func (m RangeMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, } switch expr.Operation { case syntax.OpRangeTypeBytes, syntax.OpRangeTypeCount, syntax.OpRangeTypeSum: - return m.vectorAggrWithRangeDownstreams(expr, vectorAggrPushdown, syntax.OpTypeSum, rangeInterval) + return m.vectorAggrWithRangeDownstreams(expr, vectorAggrPushdown, syntax.OpTypeSum, rangeInterval, recorder) case syntax.OpRangeTypeMax: - return m.vectorAggrWithRangeDownstreams(expr, vectorAggrPushdown, syntax.OpTypeMax, rangeInterval) + return m.vectorAggrWithRangeDownstreams(expr, vectorAggrPushdown, syntax.OpTypeMax, rangeInterval, recorder) case syntax.OpRangeTypeMin: - return m.vectorAggrWithRangeDownstreams(expr, vectorAggrPushdown, syntax.OpTypeMin, rangeInterval) + return m.vectorAggrWithRangeDownstreams(expr, vectorAggrPushdown, syntax.OpTypeMin, rangeInterval, recorder) case syntax.OpRangeTypeRate: - return m.sumOverFullRange(expr, vectorAggrPushdown, syntax.OpRangeTypeCount, rangeInterval) + return m.sumOverFullRange(expr, vectorAggrPushdown, syntax.OpRangeTypeCount, rangeInterval, recorder) case syntax.OpRangeTypeBytesRate: - return m.sumOverFullRange(expr, vectorAggrPushdown, syntax.OpRangeTypeBytes, rangeInterval) + return m.sumOverFullRange(expr, vectorAggrPushdown, syntax.OpRangeTypeBytes, rangeInterval, recorder) default: // this should not be reachable. // If an operation is splittable it should have an optimization listed. diff --git a/pkg/logql/rangemapper_test.go b/pkg/logql/rangemapper_test.go index 9c77a03f22b0e..e3ed1ac726252 100644 --- a/pkg/logql/rangemapper_test.go +++ b/pkg/logql/rangemapper_test.go @@ -8,7 +8,7 @@ import ( ) func Test_SplitRangeInterval(t *testing.T) { - rvm, err := NewRangeMapper(2 * time.Second) + rvm, err := NewRangeMapper(2*time.Second, nilShardMetrics) require.NoError(t, err) for _, tc := range []struct { @@ -50,7 +50,7 @@ func Test_SplitRangeInterval(t *testing.T) { } func Test_SplitRangeVectorMapping(t *testing.T) { - rvm, err := NewRangeMapper(time.Minute) + rvm, err := NewRangeMapper(time.Minute, nilShardMetrics) require.NoError(t, err) for _, tc := range []struct { @@ -972,7 +972,7 @@ func Test_SplitRangeVectorMapping(t *testing.T) { } func Test_SplitRangeVectorMapping_Noop(t *testing.T) { - rvm, err := NewRangeMapper(time.Minute) + rvm, err := NewRangeMapper(time.Minute, nilShardMetrics) require.NoError(t, err) for _, tc := range []struct { @@ -1050,7 +1050,7 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) { } func Test_FailQuery(t *testing.T) { - rvm, err := NewRangeMapper(2 * time.Minute) + rvm, err := NewRangeMapper(2*time.Minute, nilShardMetrics) require.NoError(t, err) _, _, err = rvm.Parse(`{app="foo"} |= "err"`) require.Error(t, err) diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go index 0cb9ef1af5f1d..2d06c8d8735ab 100644 --- a/pkg/logql/shardmapper.go +++ b/pkg/logql/shardmapper.go @@ -6,86 +6,20 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/querier/astmapper" util_log "github.com/grafana/loki/pkg/util/log" ) -// keys used in metrics -const ( - StreamsKey = "streams" - MetricsKey = "metrics" - SuccessKey = "success" - FailureKey = "failure" - NoopKey = "noop" -) - -// ShardingMetrics is the metrics wrapper used in shard mapping -type ShardingMetrics struct { - Shards *prometheus.CounterVec // sharded queries total, partitioned by (streams/metric) - ShardFactor prometheus.Histogram // per request shard factor - parsed *prometheus.CounterVec // parsed ASTs total, partitioned by (success/failure/noop) -} - -func NewShardingMetrics(registerer prometheus.Registerer) *ShardingMetrics { - return &ShardingMetrics{ - Shards: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Namespace: "loki", - Name: "query_frontend_shards_total", - }, []string{"type"}), - parsed: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Namespace: "loki", - Name: "query_frontend_sharding_parsed_queries_total", - }, []string{"type"}), - ShardFactor: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Namespace: "loki", - Name: "query_frontend_shard_factor", - Help: "Number of shards per request", - Buckets: prometheus.LinearBuckets(0, 16, 4), // 16 is the default shard factor for later schemas - }), - } -} - -// shardRecorder constructs a recorder using the underlying metrics. -func (m *ShardingMetrics) shardRecorder() *shardRecorder { - return &shardRecorder{ - ShardingMetrics: m, - } -} - -// shardRecorder wraps a vector & histogram, providing an easy way to increment sharding counts. -// and unify them into histogram entries. -// NOT SAFE FOR CONCURRENT USE! We avoid introducing mutex locking here -// because AST mapping is single threaded. -type shardRecorder struct { - done bool - total int - *ShardingMetrics -} - -// Add increments both the shard count and tracks it for the eventual histogram entry. -func (r *shardRecorder) Add(x int, key string) { - r.total += x - r.Shards.WithLabelValues(key).Add(float64(x)) -} - -// Finish idemptotently records a histogram entry with the total shard factor. -func (r *shardRecorder) Finish() { - if !r.done { - r.done = true - r.ShardFactor.Observe(float64(r.total)) - } -} - -func badASTMapping(got syntax.Expr) error { - return fmt.Errorf("bad AST mapping: expected SampleExpr, but got (%T)", got) +type ShardMapper struct { + shards int + metrics *MapperMetrics } -func NewShardMapper(shards int, metrics *ShardingMetrics) (ShardMapper, error) { +func NewShardMapper(shards int, metrics *MapperMetrics) (ShardMapper, error) { if shards < 2 { - return ShardMapper{}, fmt.Errorf("Cannot create ShardMapper with <2 shards. Received %d", shards) + return ShardMapper{}, fmt.Errorf("cannot create ShardMapper with <2 shards. Received %d", shards) } return ShardMapper{ shards: shards, @@ -93,9 +27,8 @@ func NewShardMapper(shards int, metrics *ShardingMetrics) (ShardMapper, error) { }, nil } -type ShardMapper struct { - shards int - metrics *ShardingMetrics +func NewShardMapperMetrics(registerer prometheus.Registerer) *MapperMetrics { + return newMapperMetrics(registerer, "shard") } func (m ShardMapper) Parse(query string) (noop bool, expr syntax.Expr, err error) { @@ -104,11 +37,11 @@ func (m ShardMapper) Parse(query string) (noop bool, expr syntax.Expr, err error return false, nil, err } - recorder := m.metrics.shardRecorder() + recorder := m.metrics.downstreamRecorder() mapped, err := m.Map(parsed, recorder) if err != nil { - m.metrics.parsed.WithLabelValues(FailureKey).Inc() + m.metrics.ParsedQueries.WithLabelValues(FailureKey).Inc() return false, nil, err } @@ -116,9 +49,9 @@ func (m ShardMapper) Parse(query string) (noop bool, expr syntax.Expr, err error mappedStr := mapped.String() noop = originalStr == mappedStr if noop { - m.metrics.parsed.WithLabelValues(NoopKey).Inc() + m.metrics.ParsedQueries.WithLabelValues(NoopKey).Inc() } else { - m.metrics.parsed.WithLabelValues(SuccessKey).Inc() + m.metrics.ParsedQueries.WithLabelValues(SuccessKey).Inc() } recorder.Finish() // only record metrics for successful mappings @@ -126,7 +59,7 @@ func (m ShardMapper) Parse(query string) (noop bool, expr syntax.Expr, err error return noop, mapped, err } -func (m ShardMapper) Map(expr syntax.Expr, r *shardRecorder) (syntax.Expr, error) { +func (m ShardMapper) Map(expr syntax.Expr, r *downstreamRecorder) (syntax.Expr, error) { // immediately clone the passed expr to avoid mutating the original expr, err := syntax.Clone(expr) if err != nil { @@ -169,7 +102,7 @@ func (m ShardMapper) Map(expr syntax.Expr, r *shardRecorder) (syntax.Expr, error } } -func (m ShardMapper) mapLogSelectorExpr(expr syntax.LogSelectorExpr, r *shardRecorder) syntax.LogSelectorExpr { +func (m ShardMapper) mapLogSelectorExpr(expr syntax.LogSelectorExpr, r *downstreamRecorder) syntax.LogSelectorExpr { var head *ConcatLogSelectorExpr for i := m.shards - 1; i >= 0; i-- { head = &ConcatLogSelectorExpr{ @@ -188,7 +121,7 @@ func (m ShardMapper) mapLogSelectorExpr(expr syntax.LogSelectorExpr, r *shardRec return head } -func (m ShardMapper) mapSampleExpr(expr syntax.SampleExpr, r *shardRecorder) syntax.SampleExpr { +func (m ShardMapper) mapSampleExpr(expr syntax.SampleExpr, r *downstreamRecorder) syntax.SampleExpr { var head *ConcatSampleExpr for i := m.shards - 1; i >= 0; i-- { head = &ConcatSampleExpr{ @@ -209,7 +142,7 @@ func (m ShardMapper) mapSampleExpr(expr syntax.SampleExpr, r *shardRecorder) syn // technically, std{dev,var} are also parallelizable if there is no cross-shard merging // in descendent nodes in the AST. This optimization is currently avoided for simplicity. -func (m ShardMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr, r *shardRecorder) (syntax.SampleExpr, error) { +func (m ShardMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr, r *downstreamRecorder) (syntax.SampleExpr, error) { // if this AST contains unshardable operations, don't shard this at this level, // but attempt to shard a child node. if !expr.Shardable() { @@ -285,7 +218,7 @@ func (m ShardMapper) mapVectorAggregationExpr(expr *syntax.VectorAggregationExpr } } -func (m ShardMapper) mapLabelReplaceExpr(expr *syntax.LabelReplaceExpr, r *shardRecorder) (syntax.SampleExpr, error) { +func (m ShardMapper) mapLabelReplaceExpr(expr *syntax.LabelReplaceExpr, r *downstreamRecorder) (syntax.SampleExpr, error) { subMapped, err := m.Map(expr.Left, r) if err != nil { return nil, err @@ -295,10 +228,10 @@ func (m ShardMapper) mapLabelReplaceExpr(expr *syntax.LabelReplaceExpr, r *shard return &cpy, nil } -func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, r *shardRecorder) syntax.SampleExpr { +func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, r *downstreamRecorder) syntax.SampleExpr { if hasLabelModifier(expr) { - // if an expr can modify labels this means multiple shards can returns the same labelset. - // When this happens the merge strategy needs to be different than a simple concatenation. + // if an expr can modify labels this means multiple shards can return the same labelset. + // When this happens the merge strategy needs to be different from a simple concatenation. // For instance for rates we need to sum data from different shards but same series. // Since we currently support only concatenation as merge strategy, we skip those queries. return expr @@ -329,3 +262,7 @@ func hasLabelModifier(expr *syntax.RangeAggregationExpr) bool { } return false } + +func badASTMapping(got syntax.Expr) error { + return fmt.Errorf("bad AST mapping: expected SampleExpr, but got (%T)", got) +} diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index 7030656b10615..c9c30824f159c 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -51,7 +51,7 @@ func TestShardedStringer(t *testing.T) { } func TestMapSampleExpr(t *testing.T) { - m, err := NewShardMapper(2, nilMetrics) + m, err := NewShardMapper(2, nilShardMetrics) require.Nil(t, err) for _, tc := range []struct { @@ -106,13 +106,13 @@ func TestMapSampleExpr(t *testing.T) { }, } { t.Run(tc.in.String(), func(t *testing.T) { - require.Equal(t, tc.out, m.mapSampleExpr(tc.in, nilMetrics.shardRecorder())) + require.Equal(t, tc.out, m.mapSampleExpr(tc.in, nilShardMetrics.downstreamRecorder())) }) } } func TestMappingStrings(t *testing.T) { - m, err := NewShardMapper(2, nilMetrics) + m, err := NewShardMapper(2, nilShardMetrics) require.Nil(t, err) for _, tc := range []struct { in string @@ -260,7 +260,7 @@ func TestMappingStrings(t *testing.T) { ast, err := syntax.ParseExpr(tc.in) require.Nil(t, err) - mapped, err := m.Map(ast, nilMetrics.shardRecorder()) + mapped, err := m.Map(ast, nilShardMetrics.downstreamRecorder()) require.Nil(t, err) require.Equal(t, removeWhiteSpace(tc.out), removeWhiteSpace(mapped.String())) @@ -269,7 +269,7 @@ func TestMappingStrings(t *testing.T) { } func TestMapping(t *testing.T) { - m, err := NewShardMapper(2, nilMetrics) + m, err := NewShardMapper(2, nilShardMetrics) require.Nil(t, err) for _, tc := range []struct { @@ -1109,7 +1109,7 @@ func TestMapping(t *testing.T) { ast, err := syntax.ParseExpr(tc.in) require.Equal(t, tc.err, err) - mapped, err := m.Map(ast, nilMetrics.shardRecorder()) + mapped, err := m.Map(ast, nilShardMetrics.downstreamRecorder()) require.Equal(t, tc.err, err) require.Equal(t, tc.expr.String(), mapped.String()) diff --git a/pkg/querier/queryrange/metrics.go b/pkg/querier/queryrange/metrics.go index 25a9d883b94e8..81c3ed025251c 100644 --- a/pkg/querier/queryrange/metrics.go +++ b/pkg/querier/queryrange/metrics.go @@ -10,16 +10,28 @@ import ( type Metrics struct { *queryrangebase.InstrumentMiddlewareMetrics *queryrangebase.RetryMiddlewareMetrics - *logql.ShardingMetrics + *MiddlewareMapperMetrics *SplitByMetrics *LogResultCacheMetrics } +type MiddlewareMapperMetrics struct { + shardMapper *logql.MapperMetrics + rangeMapper *logql.MapperMetrics +} + +func NewMiddlewareMapperMetrics(registerer prometheus.Registerer) *MiddlewareMapperMetrics { + return &MiddlewareMapperMetrics{ + shardMapper: logql.NewShardMapperMetrics(registerer), + rangeMapper: logql.NewRangeMapperMetrics(registerer), + } +} + func NewMetrics(registerer prometheus.Registerer) *Metrics { return &Metrics{ InstrumentMiddlewareMetrics: queryrangebase.NewInstrumentMiddlewareMetrics(registerer), RetryMiddlewareMetrics: queryrangebase.NewRetryMiddlewareMetrics(registerer), - ShardingMetrics: logql.NewShardingMetrics(registerer), + MiddlewareMapperMetrics: NewMiddlewareMapperMetrics(registerer), SplitByMetrics: NewSplitByMetrics(registerer), LogResultCacheMetrics: NewLogResultCacheMetrics(registerer), } diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index bdd742ddda83b..abab1186caf5f 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -33,7 +33,7 @@ func NewQueryShardMiddleware( logger log.Logger, confs ShardingConfigs, middlewareMetrics *queryrangebase.InstrumentMiddlewareMetrics, - shardingMetrics *logql.ShardingMetrics, + shardingMetrics *logql.MapperMetrics, limits Limits, ) queryrangebase.Middleware { noshards := !hasShards(confs) @@ -68,14 +68,14 @@ func newASTMapperware( confs ShardingConfigs, next queryrangebase.Handler, logger log.Logger, - metrics *logql.ShardingMetrics, + metrics *logql.MapperMetrics, limits logql.Limits, ) *astMapperware { return &astMapperware{ confs: confs, logger: log.With(logger, "middleware", "QueryShard.astMapperware"), next: next, - ng: logql.NewDownstreamEngine(logql.EngineOpts{}, DownstreamHandler{next}, metrics, limits, logger), + ng: logql.NewDownstreamEngine(logql.EngineOpts{}, DownstreamHandler{next}, limits, logger), metrics: metrics, } } @@ -85,7 +85,7 @@ type astMapperware struct { logger log.Logger next queryrangebase.Handler ng *logql.DownstreamEngine - metrics *logql.ShardingMetrics + metrics *logql.MapperMetrics } func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { @@ -262,7 +262,7 @@ func NewSeriesQueryShardMiddleware( logger log.Logger, confs ShardingConfigs, middlewareMetrics *queryrangebase.InstrumentMiddlewareMetrics, - shardingMetrics *logql.ShardingMetrics, + shardingMetrics *logql.MapperMetrics, limits queryrangebase.Limits, merger queryrangebase.Merger, ) queryrangebase.Middleware { @@ -294,7 +294,7 @@ type seriesShardingHandler struct { confs ShardingConfigs logger log.Logger next queryrangebase.Handler - metrics *logql.ShardingMetrics + metrics *logql.MapperMetrics limits queryrangebase.Limits merger queryrangebase.Merger } @@ -316,8 +316,8 @@ func (ss *seriesShardingHandler) Do(ctx context.Context, r queryrangebase.Reques return nil, fmt.Errorf("expected *LokiSeriesRequest, got (%T)", r) } - ss.metrics.Shards.WithLabelValues("series").Inc() - ss.metrics.ShardFactor.Observe(float64(conf.RowShards)) + ss.metrics.DownstreamQueries.WithLabelValues("series").Inc() + ss.metrics.DownstreamFactor.Observe(float64(conf.RowShards)) requests := make([]queryrangebase.Request, 0, conf.RowShards) for i := 0; i < int(conf.RowShards); i++ { diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index 4dc7ec5bc0d6f..96e2c107d02f7 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -23,7 +23,7 @@ import ( ) var ( - nilShardingMetrics = logql.NewShardingMetrics(nil) + nilShardingMetrics = logql.NewShardMapperMetrics(nil) defaultReq = func() *LokiRequest { return &LokiRequest{ Limit: 100, diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 79bbc706011a0..825e9e7ba7f4e 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -281,7 +281,7 @@ func NewLogFilterTripperware( log, schema.Configs, metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware - metrics.ShardingMetrics, + metrics.MiddlewareMapperMetrics.shardMapper, limits, ), ) @@ -333,7 +333,7 @@ func NewSeriesTripperware( log, schema.Configs, metrics.InstrumentMiddlewareMetrics, - metrics.ShardingMetrics, + metrics.MiddlewareMapperMetrics.shardMapper, limits, codec, ), @@ -438,7 +438,7 @@ func NewMetricTripperware( log, schema.Configs, metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware - metrics.ShardingMetrics, + metrics.MiddlewareMapperMetrics.shardMapper, limits, ), ) @@ -480,12 +480,12 @@ func NewInstantMetricTripperware( if cfg.ShardedQueries { queryRangeMiddleware = append(queryRangeMiddleware, - NewSplitByRangeMiddleware(log, limits, nil), + NewSplitByRangeMiddleware(log, limits, metrics.MiddlewareMapperMetrics.rangeMapper), NewQueryShardMiddleware( log, schema.Configs, metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware - metrics.ShardingMetrics, + metrics.MiddlewareMapperMetrics.shardMapper, limits, ), ) diff --git a/pkg/querier/queryrange/split_by_range.go b/pkg/querier/queryrange/split_by_range.go index 43890cf1e92fd..a0d33649584ce 100644 --- a/pkg/querier/queryrange/split_by_range.go +++ b/pkg/querier/queryrange/split_by_range.go @@ -20,21 +20,22 @@ import ( ) type splitByRange struct { - logger log.Logger - next queryrangebase.Handler - limits Limits - - ng *logql.DownstreamEngine + logger log.Logger + next queryrangebase.Handler + limits Limits + ng *logql.DownstreamEngine + metrics *logql.MapperMetrics } // NewSplitByRangeMiddleware creates a new Middleware that splits log requests by the range interval. -func NewSplitByRangeMiddleware(logger log.Logger, limits Limits, metrics *logql.ShardingMetrics) queryrangebase.Middleware { +func NewSplitByRangeMiddleware(logger log.Logger, limits Limits, metrics *logql.MapperMetrics) queryrangebase.Middleware { return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { return &splitByRange{ - logger: log.With(logger, "middleware", "InstantQuery.splitByRangeVector"), - next: next, - limits: limits, - ng: logql.NewDownstreamEngine(logql.EngineOpts{}, DownstreamHandler{next}, metrics, limits, logger), + logger: log.With(logger, "middleware", "InstantQuery.splitByRangeVector"), + next: next, + limits: limits, + ng: logql.NewDownstreamEngine(logql.EngineOpts{}, DownstreamHandler{next}, limits, logger), + metrics: metrics, } }) } @@ -53,7 +54,7 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) ( return s.next.Do(ctx, request) } - mapper, err := logql.NewRangeMapper(interval) + mapper, err := logql.NewRangeMapper(interval, s.metrics) if err != nil { return nil, err } From 9cb78bdad826991bc49d4aed983e92bbc906a53d Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 29 Apr 2022 18:48:53 +0200 Subject: [PATCH 036/223] Improve index entries deduping and concurrency (#6055) --- pkg/storage/stores/shipper/util/queries.go | 186 +++++++++--------- .../stores/shipper/util/queries_test.go | 123 ++++++++++-- 2 files changed, 207 insertions(+), 102 deletions(-) diff --git a/pkg/storage/stores/shipper/util/queries.go b/pkg/storage/stores/shipper/util/queries.go index 01696501a55d1..27d299bf643c9 100644 --- a/pkg/storage/stores/shipper/util/queries.go +++ b/pkg/storage/stores/shipper/util/queries.go @@ -4,14 +4,16 @@ import ( "context" "sync" - "github.com/go-kit/log/level" + "github.com/grafana/dskit/concurrency" "github.com/grafana/loki/pkg/storage/stores/series/index" util_math "github.com/grafana/loki/pkg/util/math" - "github.com/grafana/loki/pkg/util/spanlogger" ) -const maxQueriesPerGoroutine = 100 +const ( + maxQueriesBatch = 100 + maxConcurrency = 10 +) type TableQuerier interface { MultiQueries(ctx context.Context, queries []index.Query, callback index.QueryPagesCallback) error @@ -35,121 +37,127 @@ func DoParallelQueries(ctx context.Context, tableQuerier TableQuerier, queries [ if len(queries) == 0 { return nil } - errs := make(chan error) - - id := NewIndexDeduper(callback) - defer func() { - logger := spanlogger.FromContext(ctx) - level.Debug(logger).Log("msg", "done processing index queries", "table-name", queries[0].TableName, - "query-count", len(queries), "num-entries-sent", id.numEntriesSent) - }() - - if len(queries) <= maxQueriesPerGoroutine { - return tableQuerier.MultiQueries(ctx, queries, id.Callback) + if len(queries) <= maxQueriesBatch { + return tableQuerier.MultiQueries(ctx, queries, NewCallbackDeduper(callback, len(queries))) } - for i := 0; i < len(queries); i += maxQueriesPerGoroutine { - q := queries[i:util_math.Min(i+maxQueriesPerGoroutine, len(queries))] - go func(queries []index.Query) { - errs <- tableQuerier.MultiQueries(ctx, queries, id.Callback) - }(q) + jobsCount := len(queries) / maxQueriesBatch + if len(queries)%maxQueriesBatch != 0 { + jobsCount++ } - - var lastErr error - for i := 0; i < len(queries); i += maxQueriesPerGoroutine { - err := <-errs - if err != nil { - lastErr = err - } - } - - return lastErr + callback = NewSyncCallbackDeduper(callback, len(queries)) + return concurrency.ForEachJob(ctx, jobsCount, maxConcurrency, func(ctx context.Context, idx int) error { + return tableQuerier.MultiQueries(ctx, queries[idx*maxQueriesBatch:util_math.Min((idx+1)*maxQueriesBatch, len(queries))], callback) + }) } -// IndexDeduper should always be used on table level not the whole query level because it just looks at range values which can be repeated across tables +// NewSyncCallbackDeduper should always be used on table level not the whole query level because it just looks at range values which can be repeated across tables +// NewSyncCallbackDeduper is safe to used by multiple goroutines // Cortex anyways dedupes entries across tables -type IndexDeduper struct { - callback index.QueryPagesCallback - seenRangeValues map[string]map[string]struct{} - numEntriesSent int - mtx sync.RWMutex +func NewSyncCallbackDeduper(callback index.QueryPagesCallback, queries int) index.QueryPagesCallback { + syncMap := &syncMap{ + seen: make(map[string]map[string]struct{}, queries), + } + return func(q index.Query, rbr index.ReadBatchResult) bool { + return callback(q, &readBatchDeduperSync{ + syncMap: syncMap, + hashValue: q.HashValue, + ReadBatchIterator: rbr.Iterator(), + }) + } } -func NewIndexDeduper(callback index.QueryPagesCallback) *IndexDeduper { - return &IndexDeduper{ - callback: callback, - seenRangeValues: map[string]map[string]struct{}{}, +// NewCallbackDeduper should always be used on table level not the whole query level because it just looks at range values which can be repeated across tables +// NewCallbackDeduper is safe not to used by multiple goroutines +// Cortex anyways dedupes entries across tables +func NewCallbackDeduper(callback index.QueryPagesCallback, queries int) index.QueryPagesCallback { + f := &readBatchDeduper{ + seen: make(map[string]map[string]struct{}, queries), + } + return func(q index.Query, rbr index.ReadBatchResult) bool { + f.hashValue = q.HashValue + f.ReadBatchIterator = rbr.Iterator() + return callback(q, f) } } -func (i *IndexDeduper) Callback(query index.Query, batch index.ReadBatchResult) bool { - return i.callback(query, &filteringBatch{ - query: query, - ReadBatchResult: batch, - isSeen: i.isSeen, - }) +type readBatchDeduper struct { + index.ReadBatchIterator + hashValue string + seen map[string]map[string]struct{} } -func (i *IndexDeduper) isSeen(hashValue string, rangeValue []byte) bool { - i.mtx.RLock() - - // index entries are never modified during query processing so it should be safe to reference a byte slice as a string. - rangeValueStr := GetUnsafeString(rangeValue) - - if _, ok := i.seenRangeValues[hashValue][rangeValueStr]; ok { - i.mtx.RUnlock() - return true - } - - i.mtx.RUnlock() - - i.mtx.Lock() - defer i.mtx.Unlock() +func (f *readBatchDeduper) Iterator() index.ReadBatchIterator { + return f +} - // re-check if another concurrent call added the values already, if so do not add it again and return true - if _, ok := i.seenRangeValues[hashValue][rangeValueStr]; ok { +func (f *readBatchDeduper) Next() bool { + for f.ReadBatchIterator.Next() { + rangeValue := f.RangeValue() + hashes, ok := f.seen[f.hashValue] + if !ok { + hashes = map[string]struct{}{} + hashes[GetUnsafeString(rangeValue)] = struct{}{} + f.seen[f.hashValue] = hashes + return true + } + h := GetUnsafeString(rangeValue) + if _, loaded := hashes[h]; loaded { + continue + } + hashes[h] = struct{}{} return true } - // add the hashValue first if missing - if _, ok := i.seenRangeValues[hashValue]; !ok { - i.seenRangeValues[hashValue] = map[string]struct{}{} - } - - // add the rangeValue - i.seenRangeValues[hashValue][rangeValueStr] = struct{}{} - i.numEntriesSent++ return false } -type isSeen func(hashValue string, rangeValue []byte) bool - -type filteringBatch struct { - query index.Query - index.ReadBatchResult - isSeen isSeen +type syncMap struct { + seen map[string]map[string]struct{} + rw sync.RWMutex // nolint: structcheck } -func (f *filteringBatch) Iterator() index.ReadBatchIterator { - return &filteringBatchIter{ - query: f.query, - ReadBatchIterator: f.ReadBatchResult.Iterator(), - isSeen: f.isSeen, - } +type readBatchDeduperSync struct { + index.ReadBatchIterator + hashValue string + *syncMap } -type filteringBatchIter struct { - query index.Query - index.ReadBatchIterator - isSeen isSeen +func (f *readBatchDeduperSync) Iterator() index.ReadBatchIterator { + return f } -func (f *filteringBatchIter) Next() bool { +func (f *readBatchDeduperSync) Next() bool { for f.ReadBatchIterator.Next() { - if f.isSeen(f.query.HashValue, f.ReadBatchIterator.RangeValue()) { + rangeValue := f.RangeValue() + f.rw.RLock() + hashes, ok := f.seen[f.hashValue] + if ok { + h := GetUnsafeString(rangeValue) + if _, loaded := hashes[h]; loaded { + f.rw.RUnlock() + continue + } + f.rw.RUnlock() + f.rw.Lock() + if _, loaded := hashes[h]; loaded { + f.rw.Unlock() + continue + } + hashes[h] = struct{}{} + f.rw.Unlock() + return true + } + f.rw.RUnlock() + f.rw.Lock() + if _, ok := f.seen[f.hashValue]; ok { + f.rw.Unlock() continue } - + f.seen[f.hashValue] = map[string]struct{}{ + GetUnsafeString(rangeValue): {}, + } + f.rw.Unlock() return true } diff --git a/pkg/storage/stores/shipper/util/queries_test.go b/pkg/storage/stores/shipper/util/queries_test.go index eb8359ffd3db0..91ca8cc4933ec 100644 --- a/pkg/storage/stores/shipper/util/queries_test.go +++ b/pkg/storage/stores/shipper/util/queries_test.go @@ -46,15 +46,15 @@ func TestDoParallelQueries(t *testing.T) { }{ { name: "queries < maxQueriesPerGoroutine", - queryCount: maxQueriesPerGoroutine / 2, + queryCount: maxQueriesBatch / 2, }, { name: "queries = maxQueriesPerGoroutine", - queryCount: maxQueriesPerGoroutine, + queryCount: maxQueriesBatch, }, { name: "queries > maxQueriesPerGoroutine", - queryCount: maxQueriesPerGoroutine * 2, + queryCount: maxQueriesBatch * 2, }, } { t.Run(tc.name, func(t *testing.T) { @@ -157,20 +157,39 @@ func TestIndexDeduper(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - actualValues := map[string][][]byte{} - deduper := NewIndexDeduper(func(query index.Query, readBatch index.ReadBatchResult) bool { - itr := readBatch.Iterator() - for itr.Next() { - actualValues[query.HashValue] = append(actualValues[query.HashValue], itr.RangeValue()) + t.Run("sync", func(t *testing.T) { + actualValues := map[string][][]byte{} + deduper := NewSyncCallbackDeduper(func(query index.Query, readBatch index.ReadBatchResult) bool { + itr := readBatch.Iterator() + for itr.Next() { + actualValues[query.HashValue] = append(actualValues[query.HashValue], itr.RangeValue()) + } + return true + }, 0) + + for _, batch := range tc.batches { + deduper(index.Query{HashValue: batch.hashValue}, batch) } - return true + + require.Equal(t, tc.expectedValues, actualValues) }) - for _, batch := range tc.batches { - deduper.Callback(index.Query{HashValue: batch.hashValue}, batch) - } + t.Run("nosync", func(t *testing.T) { + actualValues := map[string][][]byte{} + deduper := NewCallbackDeduper(func(query index.Query, readBatch index.ReadBatchResult) bool { + itr := readBatch.Iterator() + for itr.Next() { + actualValues[query.HashValue] = append(actualValues[query.HashValue], itr.RangeValue()) + } + return true + }, 0) - require.Equal(t, tc.expectedValues, actualValues) + for _, batch := range tc.batches { + deduper(index.Query{HashValue: batch.hashValue}, batch) + } + + require.Equal(t, tc.expectedValues, actualValues) + }) }) } } @@ -207,3 +226,81 @@ func (b batchIterator) RangeValue() []byte { func (b batchIterator) Value() []byte { panic("implement me") } + +func Benchmark_DedupeCallback(b *testing.B) { + deduper := NewCallbackDeduper(func(_ index.Query, readBatch index.ReadBatchResult) bool { + itr := readBatch.Iterator() + for itr.Next() { + _ = itr.RangeValue() + } + return true + }, 1) + q := index.Query{HashValue: "1"} + batch1 := batch{ + hashValue: "1", + rangeValues: [][]byte{[]byte("a"), []byte("b"), []byte("c")}, + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + deduper(q, batch1) + } +} + +type TableQuerierFunc func(ctx context.Context, queries []index.Query, callback index.QueryPagesCallback) error + +func (f TableQuerierFunc) MultiQueries(ctx context.Context, queries []index.Query, callback index.QueryPagesCallback) error { + return f(ctx, queries, callback) +} + +func Benchmark_MultiQueries(b *testing.B) { + benchmarkMultiQueries(b, 50) + benchmarkMultiQueries(b, 100) + benchmarkMultiQueries(b, 1000) + benchmarkMultiQueries(b, 10000) + benchmarkMultiQueries(b, 50000) +} + +func benchmarkMultiQueries(b *testing.B, n int) { + b.Run(strconv.Itoa(n), func(b *testing.B) { + callback := index.QueryPagesCallback(func(_ index.Query, readBatch index.ReadBatchResult) bool { + itr := readBatch.Iterator() + for itr.Next() { + _ = itr.RangeValue() + } + return true + }) + queries := make([]index.Query, n) + for i := range queries { + queries[i] = index.Query{HashValue: strconv.Itoa(i)} + } + ranges := [][]byte{[]byte("a"), []byte("b"), []byte("c")} + ctx := context.Background() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = DoParallelQueries(ctx, TableQuerierFunc(func(_ context.Context, queries []index.Query, callback index.QueryPagesCallback) error { + for _, query := range queries { + callback(query, batch{ + hashValue: query.HashValue, + rangeValues: ranges, + }) + callback(query, batch{ + hashValue: query.HashValue, + rangeValues: ranges, + }) + callback(query, batch{ + hashValue: query.HashValue, + rangeValues: ranges, + }) + callback(query, batch{ + hashValue: query.HashValue, + rangeValues: ranges, + }) + } + return nil + }), queries, callback) + } + }) +} From 309592a8cf98d279e969aa3f92795fd7fc29436a Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Mon, 2 May 2022 02:36:51 -0400 Subject: [PATCH 037/223] ignores reporting cancellations as errors in the usage-reporter module (#6058) --- pkg/usagestats/reporter.go | 11 +++++++++-- pkg/usagestats/reporter_test.go | 4 ++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pkg/usagestats/reporter.go b/pkg/usagestats/reporter.go index 06a1f95e057fc..3387e57a8de9e 100644 --- a/pkg/usagestats/reporter.go +++ b/pkg/usagestats/reporter.go @@ -3,6 +3,7 @@ package usagestats import ( "bytes" "context" + "errors" "flag" "io" "math" @@ -254,7 +255,10 @@ func (rep *Reporter) running(ctx context.Context) error { if rep.cluster == nil { <-ctx.Done() - return ctx.Err() + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + return err + } + return nil } // check every minute if we should report. ticker := time.NewTicker(reportCheckInterval) @@ -281,7 +285,10 @@ func (rep *Reporter) running(ctx context.Context) error { rep.lastReport = next next = next.Add(reportInterval) case <-ctx.Done(): - return ctx.Err() + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + return err + } + return nil } } } diff --git a/pkg/usagestats/reporter_test.go b/pkg/usagestats/reporter_test.go index b5fb7e8ca691b..e6a0ed9712a3a 100644 --- a/pkg/usagestats/reporter_test.go +++ b/pkg/usagestats/reporter_test.go @@ -97,7 +97,7 @@ func Test_ReportLoop(t *testing.T) { <-time.After(6*time.Second + (stabilityCheckInterval * time.Duration(stabilityMinimunRequired+1))) cancel() }() - require.Equal(t, context.Canceled, r.running(ctx)) + require.Equal(t, nil, r.running(ctx)) require.GreaterOrEqual(t, totalReport, 5) first := clusterIDs[0] for _, uid := range clusterIDs { @@ -156,5 +156,5 @@ func TestWrongKV(t *testing.T) { <-time.After(1 * time.Second) cancel() }() - require.Equal(t, context.Canceled, r.running(ctx)) + require.Equal(t, nil, r.running(ctx)) } From eb242f305dc3637375c97dd5e35e844fba33eb1e Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Mon, 2 May 2022 00:44:54 -0600 Subject: [PATCH 038/223] Add tailing metrics (#6059) * Add tailing metrics * fix test * fix test --- pkg/loki/modules.go | 4 ++-- pkg/querier/metrics.go | 29 +++++++++++++++++++++++++ pkg/querier/querier.go | 7 ++++++- pkg/querier/querier_test.go | 2 +- pkg/querier/tail.go | 42 +++++++++++++++++++++++++++++++++++-- pkg/querier/tail_test.go | 2 +- 6 files changed, 79 insertions(+), 7 deletions(-) create mode 100644 pkg/querier/metrics.go diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 9d36d1e96a5b6..6e648da86b4a0 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -238,7 +238,7 @@ func (t *Loki) initQuerier() (services.Service, error) { return nil, err } - q, err := querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.overrides, deleteStore) + q, err := querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.overrides, deleteStore, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -699,7 +699,7 @@ func (t *Loki) initRuler() (_ services.Service, err error) { return nil, err } - q, err := querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.overrides, deleteStore) + q, err := querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.overrides, deleteStore, nil) if err != nil { return nil, err } diff --git a/pkg/querier/metrics.go b/pkg/querier/metrics.go new file mode 100644 index 0000000000000..3da0690da46da --- /dev/null +++ b/pkg/querier/metrics.go @@ -0,0 +1,29 @@ +package querier + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type Metrics struct { + tailsActive prometheus.Gauge + tailedStreamsActive prometheus.Gauge + tailedBytesTotal prometheus.Counter +} + +func NewMetrics(r prometheus.Registerer) *Metrics { + return &Metrics{ + tailsActive: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "loki_querier_tail_active", + Help: "Number of active tailers", + }), + tailedStreamsActive: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "loki_querier_tail_active_streams", + Help: "Number of active streams being tailed", + }), + tailedBytesTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "loki_querier_tail_bytes_total", + Help: "total bytes tailed", + }), + } +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 9207381a30ca8..7bf9b1ca6f653 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -6,6 +6,8 @@ import ( "net/http" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/grafana/loki/pkg/storage/stores/shipper/compactor/deletion" "github.com/go-kit/log/level" @@ -90,6 +92,7 @@ type SingleTenantQuerier struct { limits *validation.Overrides ingesterQuerier *IngesterQuerier deleteGetter deleteGetter + metrics *Metrics } type deleteGetter interface { @@ -97,13 +100,14 @@ type deleteGetter interface { } // New makes a new Querier. -func New(cfg Config, store storage.Store, ingesterQuerier *IngesterQuerier, limits *validation.Overrides, d deleteGetter) (*SingleTenantQuerier, error) { +func New(cfg Config, store storage.Store, ingesterQuerier *IngesterQuerier, limits *validation.Overrides, d deleteGetter, r prometheus.Registerer) (*SingleTenantQuerier, error) { return &SingleTenantQuerier{ cfg: cfg, store: store, ingesterQuerier: ingesterQuerier, limits: limits, deleteGetter: d, + metrics: NewMetrics(r), }, nil } @@ -454,6 +458,7 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques }, q.cfg.TailMaxDuration, tailerWaitEntryThrottle, + q.metrics, ), nil } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 3411c5f9b7975..6ad3c6a6e976e 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1175,7 +1175,7 @@ func newQuerier(cfg Config, clientCfg client.Config, clientFactory ring_client.P return nil, err } - return New(cfg, store, iq, limits, dg) + return New(cfg, store, iq, limits, dg, nil) } type mockDeleteGettter struct { diff --git a/pkg/querier/tail.go b/pkg/querier/tail.go index 313f4e475ba2d..e46732fb803d5 100644 --- a/pkg/querier/tail.go +++ b/pkg/querier/tail.go @@ -41,6 +41,10 @@ type Tailer struct { currEntry logproto.Entry currLabels string + // keep track of the streams for metrics about active streams + seenStreams map[uint64]struct{} + seenStreamsMtx sync.Mutex + tailDisconnectedIngesters func([]string) (map[string]logproto.Querier_TailClient, error) querierTailClients map[string]logproto.Querier_TailClient // addr -> grpc clients for tailing logs from ingesters @@ -55,6 +59,7 @@ type Tailer struct { // if we are not seeing any response from ingester, // how long do we want to wait by going into sleep waitEntryThrottle time.Duration + metrics *Metrics } func (t *Tailer) readTailClients() { @@ -95,8 +100,11 @@ func (t *Tailer) loop() { // Read as much entries as we can (up to the max allowed) and populate the // tail response we'll send over the response channel - tailResponse := new(loghttp.TailResponse) - entriesCount := 0 + var ( + tailResponse = new(loghttp.TailResponse) + entriesCount = 0 + entriesSize = 0 + ) for ; entriesCount < maxEntriesPerTailResponse && t.next(); entriesCount++ { // If the response channel channel is blocked, we drop the current entry directly @@ -106,6 +114,7 @@ func (t *Tailer) loop() { continue } + entriesSize += len(t.currEntry.Line) tailResponse.Streams = append(tailResponse.Streams, logproto.Stream{ Labels: t.currLabels, Entries: []logproto.Entry{t.currEntry}, @@ -151,6 +160,7 @@ func (t *Tailer) loop() { select { case t.responseChan <- tailResponse: + t.metrics.tailedBytesTotal.Add(float64(entriesSize)) if len(droppedEntries) > 0 { droppedEntries = make([]loghttp.DroppedEntry, 0) } @@ -239,6 +249,8 @@ func (t *Tailer) next() bool { t.currEntry = t.openStreamIterator.Entry() t.currLabels = t.openStreamIterator.Labels() + t.recordStream(t.openStreamIterator.StreamHash()) + return true } @@ -246,6 +258,9 @@ func (t *Tailer) close() error { t.streamMtx.Lock() defer t.streamMtx.Unlock() + t.metrics.tailsActive.Dec() + t.metrics.tailedStreamsActive.Sub(t.activeStreamCount()) + t.stopped = true return t.openStreamIterator.Close() } @@ -264,6 +279,25 @@ func (t *Tailer) getCloseErrorChan() <-chan error { return t.closeErrChan } +func (t *Tailer) recordStream(id uint64) { + t.seenStreamsMtx.Lock() + defer t.seenStreamsMtx.Unlock() + + if _, ok := t.seenStreams[id]; ok { + return + } + + t.seenStreams[id] = struct{}{} + t.metrics.tailedStreamsActive.Inc() +} + +func (t *Tailer) activeStreamCount() float64 { + t.seenStreamsMtx.Lock() + defer t.seenStreamsMtx.Unlock() + + return float64(len(t.seenStreams)) +} + func newTailer( delayFor time.Duration, querierTailClients map[string]logproto.Querier_TailClient, @@ -271,6 +305,7 @@ func newTailer( tailDisconnectedIngesters func([]string) (map[string]logproto.Querier_TailClient, error), tailMaxDuration time.Duration, waitEntryThrottle time.Duration, + m *Metrics, ) *Tailer { t := Tailer{ openStreamIterator: iter.NewMergeEntryIterator(context.Background(), []iter.EntryIterator{historicEntries}, logproto.FORWARD), @@ -278,11 +313,14 @@ func newTailer( delayFor: delayFor, responseChan: make(chan *loghttp.TailResponse, maxBufferedTailResponses), closeErrChan: make(chan error), + seenStreams: make(map[uint64]struct{}), tailDisconnectedIngesters: tailDisconnectedIngesters, tailMaxDuration: tailMaxDuration, waitEntryThrottle: waitEntryThrottle, + metrics: m, } + t.metrics.tailsActive.Inc() t.readTailClients() go t.loop() return &t diff --git a/pkg/querier/tail_test.go b/pkg/querier/tail_test.go index f62062137954b..dbd9b9b1c9811 100644 --- a/pkg/querier/tail_test.go +++ b/pkg/querier/tail_test.go @@ -161,7 +161,7 @@ func TestTailer(t *testing.T) { tailClients["test"] = test.tailClient } - tailer := newTailer(0, tailClients, test.historicEntries, tailDisconnectedIngesters, timeout, throttle) + tailer := newTailer(0, tailClients, test.historicEntries, tailDisconnectedIngesters, timeout, throttle, NewMetrics(nil)) defer tailer.close() test.tester(t, tailer, test.tailClient) From a5b9a9a91c0564cbd9c4305183cdc4e913e38ac0 Mon Sep 17 00:00:00 2001 From: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Date: Sun, 1 May 2022 23:45:42 -0700 Subject: [PATCH 039/223] Docs: Improve wording of getting started guide (#6060) - Add the -d option to all docker-compose up commands - Make the wording consistent about the CWD --- docs/sources/getting-started/_index.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/sources/getting-started/_index.md b/docs/sources/getting-started/_index.md index 1f7dad3c12e97..bbd027a24b25b 100644 --- a/docs/sources/getting-started/_index.md +++ b/docs/sources/getting-started/_index.md @@ -55,8 +55,6 @@ The test environment uses Docker compose to instantiate these parts, each in its ## Deploy the test environment -All shell commands are issued from the `evaluate-loki` directory. - 1. With `evaluate-loki` as the current working directory, deploy the test environment using `docker-compose`: ```bash docker-compose up -d @@ -109,7 +107,7 @@ To break down the test environment: - Close the Grafana browser window -- Stop and remove all the Docker containers. With `evaluate-loki` as the current working directory: +- With `evaluate-loki` as the current working directory, stop and remove all the Docker containers: ```bash docker-compose down ``` @@ -122,20 +120,20 @@ Choose one of these two ways to apply a new configuration: - To remove already-generated logs, restart the test environment with a new configuration. - 1. Stop and clean up an existing test environment: + 1. With `evaluate-loki` as the current working directory, stop and clean up an existing test environment: ``` docker-compose down ``` 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output. - 1. Instantiate the new test environment: + 1. With `evaluate-loki` as the current working directory, instantiate the new test environment: ``` - docker-compose up + docker-compose up -d ``` - To keep already-generated logs in the running test environment, restart flog with a new configuration. 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output. - 1. Restart only the flog app within the currently-running test environment: + 1. With `evaluate-loki` as the current working directory, restart only the flog app within the currently-running test environment: ``` docker-compose up -d --force-recreate flog ``` From 2758dc659acbf5682a9655422b43396a1fed5a83 Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Mon, 2 May 2022 13:39:22 +0530 Subject: [PATCH 040/223] boltdb shipper index list cache improvements (#6054) * bypass index list cache when doing query time downloading of index * detect and refresh stale index list cache during sync --- .../indexshipper/downloads/index_set.go | 2 +- .../indexshipper/downloads/table_manager.go | 2 +- .../downloads/table_manager_test.go | 2 +- .../indexshipper/downloads/table_test.go | 21 ++++++++-- .../stores/shipper/compactor/index_set.go | 2 +- pkg/storage/stores/shipper/compactor/table.go | 4 +- .../stores/shipper/downloads/index_set.go | 26 ++++++++---- .../shipper/downloads/index_set_test.go | 2 +- pkg/storage/stores/shipper/downloads/table.go | 15 +++++-- .../stores/shipper/downloads/table_manager.go | 2 +- .../shipper/downloads/table_manager_test.go | 2 +- .../stores/shipper/downloads/table_test.go | 42 +++++++++++++++++-- .../stores/shipper/storage/cached_client.go | 21 +++++++--- .../shipper/storage/cached_client_test.go | 22 +++++----- pkg/storage/stores/shipper/storage/client.go | 27 ++++++------ .../stores/shipper/storage/client_test.go | 3 +- .../stores/shipper/storage/index_set.go | 13 ++++-- pkg/storage/stores/shipper/table_client.go | 3 +- 18 files changed, 149 insertions(+), 62 deletions(-) diff --git a/pkg/storage/stores/indexshipper/downloads/index_set.go b/pkg/storage/stores/indexshipper/downloads/index_set.go index 65363447b067f..d9fa6dcda2fe1 100644 --- a/pkg/storage/stores/indexshipper/downloads/index_set.go +++ b/pkg/storage/stores/indexshipper/downloads/index_set.go @@ -284,7 +284,7 @@ func (t *indexSet) checkStorageForUpdates(ctx context.Context, lock bool) (toDow // listing tables from store var files []storage.IndexFile - files, err = t.baseIndexSet.ListFiles(ctx, t.tableName, t.userID) + files, err = t.baseIndexSet.ListFiles(ctx, t.tableName, t.userID, false) if err != nil { return } diff --git a/pkg/storage/stores/indexshipper/downloads/table_manager.go b/pkg/storage/stores/indexshipper/downloads/table_manager.go index 423a71baab1ec..caf1a7b358f7a 100644 --- a/pkg/storage/stores/indexshipper/downloads/table_manager.go +++ b/pkg/storage/stores/indexshipper/downloads/table_manager.go @@ -263,7 +263,7 @@ func (tm *tableManager) ensureQueryReadiness(ctx context.Context) error { } // list the users that have dedicated index files for this table - _, usersWithIndex, err := tm.indexStorageClient.ListFiles(ctx, tableName) + _, usersWithIndex, err := tm.indexStorageClient.ListFiles(ctx, tableName, false) if err != nil { return err } diff --git a/pkg/storage/stores/indexshipper/downloads/table_manager_test.go b/pkg/storage/stores/indexshipper/downloads/table_manager_test.go index 9aa20c91f4a37..b62a95ac87a79 100644 --- a/pkg/storage/stores/indexshipper/downloads/table_manager_test.go +++ b/pkg/storage/stores/indexshipper/downloads/table_manager_test.go @@ -322,6 +322,6 @@ func (m *mockIndexStorageClient) ListTables(ctx context.Context) ([]string, erro return m.tablesInStorage, nil } -func (m *mockIndexStorageClient) ListFiles(ctx context.Context, tableName string) ([]storage.IndexFile, []string, error) { +func (m *mockIndexStorageClient) ListFiles(ctx context.Context, tableName string, bypassCache bool) ([]storage.IndexFile, []string, error) { return []storage.IndexFile{}, m.userIndexesInTables[tableName], nil } diff --git a/pkg/storage/stores/indexshipper/downloads/table_test.go b/pkg/storage/stores/indexshipper/downloads/table_test.go index 46189de49c135..14fb9874c6397 100644 --- a/pkg/storage/stores/indexshipper/downloads/table_test.go +++ b/pkg/storage/stores/indexshipper/downloads/table_test.go @@ -32,8 +32,8 @@ func newStorageClientWithFakeObjectsInList(storageClient storage.Client) storage return storageClientWithFakeObjectsInList{storageClient} } -func (o storageClientWithFakeObjectsInList) ListFiles(ctx context.Context, tableName string) ([]storage.IndexFile, []string, error) { - files, userIDs, err := o.Client.ListFiles(ctx, tableName) +func (o storageClientWithFakeObjectsInList) ListFiles(ctx context.Context, tableName string, bypassCache bool) ([]storage.IndexFile, []string, error) { + files, userIDs, err := o.Client.ListFiles(ctx, tableName, true) if err != nil { return nil, nil, err } @@ -46,6 +46,20 @@ func (o storageClientWithFakeObjectsInList) ListFiles(ctx context.Context, table return files, userIDs, nil } +func (o storageClientWithFakeObjectsInList) ListUserFiles(ctx context.Context, tableName, userID string, _ bool) ([]storage.IndexFile, error) { + files, err := o.Client.ListUserFiles(ctx, tableName, userID, true) + if err != nil { + return nil, err + } + + files = append(files, storage.IndexFile{ + Name: "fake-object", + ModifiedAt: time.Now(), + }) + + return files, nil +} + func buildTestTable(t *testing.T, path string) (*table, stopFunc) { storageClient := buildTestStorageClient(t, path) cachePath := filepath.Join(path, cacheDirName) @@ -53,7 +67,7 @@ func buildTestTable(t *testing.T, path string) (*table, stopFunc) { table := NewTable(tableName, cachePath, storageClient, func(path string) (index.Index, error) { return openMockIndexFile(t, path), nil }).(*table) - _, usersWithIndex, err := table.storageClient.ListFiles(context.Background(), tableName) + _, usersWithIndex, err := table.storageClient.ListFiles(context.Background(), tableName, false) require.NoError(t, err) require.NoError(t, table.EnsureQueryReadiness(context.Background(), usersWithIndex)) @@ -301,6 +315,7 @@ func TestTable_Sync(t *testing.T) { require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, newDB), []byte(newDB), 0755)) // sync the table + table.storageClient.RefreshIndexListCache(context.Background()) require.NoError(t, table.Sync(context.Background())) // check that table got the new index and dropped the deleted index diff --git a/pkg/storage/stores/shipper/compactor/index_set.go b/pkg/storage/stores/shipper/compactor/index_set.go index af1e27de1d5f4..212ce6818c52b 100644 --- a/pkg/storage/stores/shipper/compactor/index_set.go +++ b/pkg/storage/stores/shipper/compactor/index_set.go @@ -94,7 +94,7 @@ func (is *indexSet) initUserIndexSet(workingDir string) { ctx, cancelFunc := context.WithTimeout(is.ctx, userIndexReadinessTimeout) defer cancelFunc() - is.sourceObjects, is.err = is.baseIndexSet.ListFiles(is.ctx, is.tableName, is.userID) + is.sourceObjects, is.err = is.baseIndexSet.ListFiles(is.ctx, is.tableName, is.userID, false) if is.err != nil { return } diff --git a/pkg/storage/stores/shipper/compactor/table.go b/pkg/storage/stores/shipper/compactor/table.go index cac8e89f4aefa..c73b36eb2196e 100644 --- a/pkg/storage/stores/shipper/compactor/table.go +++ b/pkg/storage/stores/shipper/compactor/table.go @@ -123,7 +123,7 @@ func newTable(ctx context.Context, workingDirectory string, indexStorageClient s } func (t *table) compact(applyRetention bool) error { - indexFiles, usersWithPerUserIndex, err := t.indexStorageClient.ListFiles(t.ctx, t.name) + indexFiles, usersWithPerUserIndex, err := t.indexStorageClient.ListFiles(t.ctx, t.name, false) if err != nil { return err } @@ -209,7 +209,7 @@ func (t *table) done() error { continue } - indexFiles, err := t.baseUserIndexSet.ListFiles(t.ctx, t.name, userID) + indexFiles, err := t.baseUserIndexSet.ListFiles(t.ctx, t.name, userID, false) if err != nil { return err } diff --git a/pkg/storage/stores/shipper/downloads/index_set.go b/pkg/storage/stores/shipper/downloads/index_set.go index f0f8692d19c80..a6c9347e89ecf 100644 --- a/pkg/storage/stores/shipper/downloads/index_set.go +++ b/pkg/storage/stores/shipper/downloads/index_set.go @@ -25,8 +25,10 @@ import ( "github.com/grafana/loki/pkg/util/spanlogger" ) +var errIndexListCacheTooStale = fmt.Errorf("index list cache too stale") + type IndexSet interface { - Init() error + Init(forQuerying bool) error Close() MultiQueries(ctx context.Context, queries []index.Query, callback index.QueryPagesCallback) error DropAllDBs() error @@ -87,7 +89,7 @@ func NewIndexSet(tableName, userID, cacheLocation string, baseIndexSet storage.I } // Init downloads all the db files for the table from object storage. -func (t *indexSet) Init() (err error) { +func (t *indexSet) Init(forQuerying bool) (err error) { // Using background context to avoid cancellation of download when request times out. // We would anyways need the files for serving next requests. ctx, cancelFunc := context.WithTimeout(context.Background(), downloadTimeout) @@ -142,7 +144,7 @@ func (t *indexSet) Init() (err error) { level.Debug(logger).Log("msg", fmt.Sprintf("opened %d local files, now starting sync operation", len(t.dbs))) // sync the table to get new files and remove the deleted ones from storage. - err = t.sync(ctx, false) + err = t.sync(ctx, false, forQuerying) if err != nil { return } @@ -275,11 +277,11 @@ func (t *indexSet) cleanupDB(fileName string) error { } func (t *indexSet) Sync(ctx context.Context) (err error) { - return t.sync(ctx, true) + return t.sync(ctx, true, false) } // sync downloads updated and new files from the storage relevant for the table and removes the deleted ones -func (t *indexSet) sync(ctx context.Context, lock bool) (err error) { +func (t *indexSet) sync(ctx context.Context, lock, bypassListCache bool) (err error) { level.Debug(t.logger).Log("msg", "syncing index files") defer func() { @@ -290,7 +292,7 @@ func (t *indexSet) sync(ctx context.Context, lock bool) (err error) { t.metrics.tablesSyncOperationTotal.WithLabelValues(status).Inc() }() - toDownload, toDelete, err := t.checkStorageForUpdates(ctx, lock) + toDownload, toDelete, err := t.checkStorageForUpdates(ctx, lock, bypassListCache) if err != nil { return err } @@ -302,6 +304,14 @@ func (t *indexSet) sync(ctx context.Context, lock bool) (err error) { return err } + // if we did not bypass list cache and skipped downloading all the new files due to them being removed by compaction, + // it means the cache is not valid anymore since compaction would have happened after last index list cache refresh. + // Let us return error to ask the caller to re-run the sync after the list cache refresh. + if !bypassListCache && len(downloadedFiles) == 0 && len(toDownload) > 0 { + level.Error(t.logger).Log("msg", "we skipped downloading all the new files, possibly removed by compaction", "files", toDownload) + return errIndexListCacheTooStale + } + if lock { err = t.dbsMtx.lock(ctx) if err != nil { @@ -331,11 +341,11 @@ func (t *indexSet) sync(ctx context.Context, lock bool) (err error) { } // checkStorageForUpdates compares files from cache with storage and builds the list of files to be downloaded from storage and to be deleted from cache -func (t *indexSet) checkStorageForUpdates(ctx context.Context, lock bool) (toDownload []storage.IndexFile, toDelete []string, err error) { +func (t *indexSet) checkStorageForUpdates(ctx context.Context, lock, bypassListCache bool) (toDownload []storage.IndexFile, toDelete []string, err error) { // listing tables from store var files []storage.IndexFile - files, err = t.baseIndexSet.ListFiles(ctx, t.tableName, t.userID) + files, err = t.baseIndexSet.ListFiles(ctx, t.tableName, t.userID, bypassListCache) if err != nil { return } diff --git a/pkg/storage/stores/shipper/downloads/index_set_test.go b/pkg/storage/stores/shipper/downloads/index_set_test.go index b583b731ab789..0a7ddeb6ed2ad 100644 --- a/pkg/storage/stores/shipper/downloads/index_set_test.go +++ b/pkg/storage/stores/shipper/downloads/index_set_test.go @@ -26,7 +26,7 @@ func buildTestIndexSet(t *testing.T, userID, path string) (*indexSet, stopFunc) boltDBIndexClient, util_log.Logger, newMetrics(nil)) require.NoError(t, err) - require.NoError(t, idxSet.Init()) + require.NoError(t, idxSet.Init(false)) return idxSet.(*indexSet), func() { idxSet.Close() diff --git a/pkg/storage/stores/shipper/downloads/table.go b/pkg/storage/stores/shipper/downloads/table.go index ee82877eb1a6c..8153423475a72 100644 --- a/pkg/storage/stores/shipper/downloads/table.go +++ b/pkg/storage/stores/shipper/downloads/table.go @@ -114,7 +114,7 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, boltDBI return nil, err } - err = userIndexSet.Init() + err = userIndexSet.Init(false) if err != nil { return nil, err } @@ -128,7 +128,7 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, boltDBI return nil, err } - err = commonIndexSet.Init() + err = commonIndexSet.Init(false) if err != nil { return nil, err } @@ -254,6 +254,15 @@ func (t *table) Sync(ctx context.Context) error { for userID, indexSet := range t.indexSets { if err := indexSet.Sync(ctx); err != nil { + if errors.Is(err, errIndexListCacheTooStale) { + level.Info(t.logger).Log("msg", "we have hit stale list cache, refreshing it and running sync again") + t.storageClient.RefreshIndexListCache(ctx) + + err = indexSet.Sync(ctx) + if err == nil { + continue + } + } return errors.Wrap(err, fmt.Sprintf("failed to sync index set %s for table %s", userID, t.name)) } } @@ -309,7 +318,7 @@ func (t *table) getOrCreateIndexSet(ctx context.Context, id string, forQuerying }() } - err := indexSet.Init() + err := indexSet.Init(forQuerying) if err != nil { level.Error(t.logger).Log("msg", fmt.Sprintf("failed to init user index set %s", id), "err", err) } diff --git a/pkg/storage/stores/shipper/downloads/table_manager.go b/pkg/storage/stores/shipper/downloads/table_manager.go index 04d61d2784c28..85574b6fca6fb 100644 --- a/pkg/storage/stores/shipper/downloads/table_manager.go +++ b/pkg/storage/stores/shipper/downloads/table_manager.go @@ -297,7 +297,7 @@ func (tm *TableManager) ensureQueryReadiness(ctx context.Context) error { } // list the users that have dedicated index files for this table - _, usersWithIndex, err := tm.indexStorageClient.ListFiles(ctx, tableName) + _, usersWithIndex, err := tm.indexStorageClient.ListFiles(ctx, tableName, false) if err != nil { return err } diff --git a/pkg/storage/stores/shipper/downloads/table_manager_test.go b/pkg/storage/stores/shipper/downloads/table_manager_test.go index bac73d5b8c4cf..8f7003890eb1f 100644 --- a/pkg/storage/stores/shipper/downloads/table_manager_test.go +++ b/pkg/storage/stores/shipper/downloads/table_manager_test.go @@ -328,6 +328,6 @@ func (m *mockIndexStorageClient) ListTables(ctx context.Context) ([]string, erro return m.tablesInStorage, nil } -func (m *mockIndexStorageClient) ListFiles(ctx context.Context, tableName string) ([]storage.IndexFile, []string, error) { +func (m *mockIndexStorageClient) ListFiles(ctx context.Context, tableName string, bypassCache bool) ([]storage.IndexFile, []string, error) { return []storage.IndexFile{}, m.userIndexesInTables[tableName], nil } diff --git a/pkg/storage/stores/shipper/downloads/table_test.go b/pkg/storage/stores/shipper/downloads/table_test.go index b91ef62d53aab..cc3430f8e5450 100644 --- a/pkg/storage/stores/shipper/downloads/table_test.go +++ b/pkg/storage/stores/shipper/downloads/table_test.go @@ -38,8 +38,8 @@ func newStorageClientWithFakeObjectsInList(storageClient storage.Client) storage return storageClientWithFakeObjectsInList{storageClient} } -func (o storageClientWithFakeObjectsInList) ListFiles(ctx context.Context, tableName string) ([]storage.IndexFile, []string, error) { - files, userIDs, err := o.Client.ListFiles(ctx, tableName) +func (o storageClientWithFakeObjectsInList) ListFiles(ctx context.Context, tableName string, _ bool) ([]storage.IndexFile, []string, error) { + files, userIDs, err := o.Client.ListFiles(ctx, tableName, true) if err != nil { return nil, nil, err } @@ -52,6 +52,20 @@ func (o storageClientWithFakeObjectsInList) ListFiles(ctx context.Context, table return files, userIDs, nil } +func (o storageClientWithFakeObjectsInList) ListUserFiles(ctx context.Context, tableName, userID string, _ bool) ([]storage.IndexFile, error) { + files, err := o.Client.ListUserFiles(ctx, tableName, userID, true) + if err != nil { + return nil, err + } + + files = append(files, storage.IndexFile{ + Name: "fake-object", + ModifiedAt: time.Now(), + }) + + return files, nil +} + type stopFunc func() func buildTestClients(t *testing.T, path string) (*local.BoltIndexClient, storage.Client) { @@ -72,7 +86,7 @@ func buildTestTable(t *testing.T, path string) (*table, *local.BoltIndexClient, cachePath := filepath.Join(path, cacheDirName) table := NewTable(tableName, cachePath, storageClient, boltDBIndexClient, newMetrics(nil)).(*table) - _, usersWithIndex, err := table.storageClient.ListFiles(context.Background(), tableName) + _, usersWithIndex, err := table.storageClient.ListFiles(context.Background(), tableName, false) require.NoError(t, err) require.NoError(t, table.EnsureQueryReadiness(context.Background(), usersWithIndex)) @@ -397,6 +411,7 @@ func TestTable_Sync(t *testing.T) { testutil.AddRecordsToDB(t, filepath.Join(tablePathInStorage, newDB), boltdbClient, 20, 10, nil) // sync the table + table.storageClient.RefreshIndexListCache(context.Background()) require.NoError(t, table.Sync(context.Background())) // query and verify table has expected records from new db and the records from deleted db are gone @@ -416,6 +431,27 @@ func TestTable_Sync(t *testing.T) { _, ok := expectedFilesInDir[fileInfo.Name()] require.True(t, ok) } + + // let us simulate a compaction to test stale index list cache handling + + // first, let us add a new file and refresh the index list cache + oneMoreDB := "one-more-db" + testutil.AddRecordsToDB(t, filepath.Join(tablePathInStorage, oneMoreDB), boltdbClient, 30, 10, nil) + table.storageClient.RefreshIndexListCache(context.Background()) + + // now, without syncing the table, let us compact the index in storage + compactedDBName := "compacted-db" + testutil.AddRecordsToDB(t, filepath.Join(tablePathInStorage, compactedDBName), boltdbClient, 10, 30, nil) + require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, noUpdatesDB))) + require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, newDB))) + require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, oneMoreDB))) + + // let us run a sync which should detect the stale index list cache and sync the table after refreshing the cache + require.NoError(t, table.Sync(context.Background())) + // query and verify table has expected records + testutil.TestSingleTableQuery(t, userID, []index.Query{{}}, table, 10, 30) + + require.Len(t, table.indexSets[""].(*indexSet).dbs, 1) } func TestTable_QueryResponse(t *testing.T) { diff --git a/pkg/storage/stores/shipper/storage/cached_client.go b/pkg/storage/stores/shipper/storage/cached_client.go index d497d167fc381..af725a00cb598 100644 --- a/pkg/storage/stores/shipper/storage/cached_client.go +++ b/pkg/storage/stores/shipper/storage/cached_client.go @@ -50,7 +50,7 @@ func newCachedObjectClient(downstreamClient client.ObjectClient) *cachedObjectCl // We have a buffered channel here with a capacity of 1 to make sure only one concurrent call makes it through. // We also have a sync.WaitGroup to make sure all the concurrent calls to buildCacheOnce wait until the cache gets rebuilt since // we are doing read-through cache, and we do not want to serve stale results. -func (c *cachedObjectClient) buildCacheOnce(ctx context.Context) { +func (c *cachedObjectClient) buildCacheOnce(ctx context.Context, forceRefresh bool) { c.buildCacheWg.Add(1) defer c.buildCacheWg.Done() @@ -59,7 +59,7 @@ func (c *cachedObjectClient) buildCacheOnce(ctx context.Context) { select { case c.buildCacheChan <- struct{}{}: c.err = nil - c.err = c.buildCache(ctx) + c.err = c.buildCache(ctx, forceRefresh) <-c.buildCacheChan if c.err != nil { level.Error(util_log.Logger).Log("msg", "failed to build cache", "err", c.err) @@ -68,7 +68,16 @@ func (c *cachedObjectClient) buildCacheOnce(ctx context.Context) { } } -func (c *cachedObjectClient) List(ctx context.Context, prefix, _ string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { +func (c *cachedObjectClient) RefreshIndexListCache(ctx context.Context) { + c.buildCacheOnce(ctx, true) + c.buildCacheWg.Wait() +} + +func (c *cachedObjectClient) List(ctx context.Context, prefix, objectDelimiter string, bypassCache bool) ([]client.StorageObject, []client.StorageCommonPrefix, error) { + if bypassCache { + return c.ObjectClient.List(ctx, prefix, objectDelimiter) + } + prefix = strings.TrimSuffix(prefix, delimiter) ss := strings.Split(prefix, delimiter) if len(ss) > 2 { @@ -76,7 +85,7 @@ func (c *cachedObjectClient) List(ctx context.Context, prefix, _ string) ([]clie } if time.Since(c.cacheBuiltAt) >= cacheTimeout { - c.buildCacheOnce(ctx) + c.buildCacheOnce(ctx, false) } // wait for cache build operation to finish, if running @@ -120,8 +129,8 @@ func (c *cachedObjectClient) List(ctx context.Context, prefix, _ string) ([]clie } // buildCache builds the cache if expired -func (c *cachedObjectClient) buildCache(ctx context.Context) error { - if time.Since(c.cacheBuiltAt) < cacheTimeout { +func (c *cachedObjectClient) buildCache(ctx context.Context, forceRefresh bool) error { + if !forceRefresh && time.Since(c.cacheBuiltAt) < cacheTimeout { return nil } diff --git a/pkg/storage/stores/shipper/storage/cached_client_test.go b/pkg/storage/stores/shipper/storage/cached_client_test.go index 6a89667160df2..f7835dd0cddf6 100644 --- a/pkg/storage/stores/shipper/storage/cached_client_test.go +++ b/pkg/storage/stores/shipper/storage/cached_client_test.go @@ -65,14 +65,14 @@ func TestCachedObjectClient(t *testing.T) { cachedObjectClient := newCachedObjectClient(objectClient) // list tables - objects, commonPrefixes, err := cachedObjectClient.List(context.Background(), "", "") + objects, commonPrefixes, err := cachedObjectClient.List(context.Background(), "", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{}, objects) require.Equal(t, []client.StorageCommonPrefix{"table1", "table2", "table3"}, commonPrefixes) // list objects in all 3 tables - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table1/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table1/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{ @@ -81,7 +81,7 @@ func TestCachedObjectClient(t *testing.T) { }, objects) require.Equal(t, []client.StorageCommonPrefix{}, commonPrefixes) - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table2/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table2/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{ @@ -89,14 +89,14 @@ func TestCachedObjectClient(t *testing.T) { }, objects) require.Equal(t, []client.StorageCommonPrefix{"table2/user1"}, commonPrefixes) - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table3/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table3/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{}, objects) require.Equal(t, []client.StorageCommonPrefix{"table3/user1"}, commonPrefixes) // list user objects from table2 and table3 - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table2/user1/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table2/user1/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{ @@ -106,7 +106,7 @@ func TestCachedObjectClient(t *testing.T) { }, objects) require.Equal(t, []client.StorageCommonPrefix{}, commonPrefixes) - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table3/user1/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table3/user1/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{ @@ -116,14 +116,14 @@ func TestCachedObjectClient(t *testing.T) { require.Equal(t, []client.StorageCommonPrefix{}, commonPrefixes) // list non-existent table - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table4/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table4/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{}, objects) require.Equal(t, []client.StorageCommonPrefix{}, commonPrefixes) // list non-existent user - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table3/user2/", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "table3/user2/", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{}, objects) @@ -141,7 +141,7 @@ func TestCachedObjectClient_errors(t *testing.T) { cachedObjectClient := newCachedObjectClient(objectClient) // do the initial listing - objects, commonPrefixes, err := cachedObjectClient.List(context.Background(), "", "") + objects, commonPrefixes, err := cachedObjectClient.List(context.Background(), "", "", false) require.NoError(t, err) require.Equal(t, 1, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{}, objects) @@ -157,7 +157,7 @@ func TestCachedObjectClient_errors(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - _, _, err := cachedObjectClient.List(context.Background(), "", "") + _, _, err := cachedObjectClient.List(context.Background(), "", "", false) require.Error(t, err) require.Equal(t, 2, objectClient.listCallsCount) }() @@ -172,7 +172,7 @@ func TestCachedObjectClient_errors(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "", "") + objects, commonPrefixes, err = cachedObjectClient.List(context.Background(), "", "", false) require.NoError(t, err) require.Equal(t, 3, objectClient.listCallsCount) require.Equal(t, []client.StorageObject{}, objects) diff --git a/pkg/storage/stores/shipper/storage/client.go b/pkg/storage/stores/shipper/storage/client.go index 2e699550bf842..1663891da43b0 100644 --- a/pkg/storage/stores/shipper/storage/client.go +++ b/pkg/storage/stores/shipper/storage/client.go @@ -8,14 +8,13 @@ import ( "time" "github.com/grafana/loki/pkg/storage/chunk/client" - "github.com/grafana/loki/pkg/storage/chunk/client/local" ) const delimiter = "/" // UserIndexClient allows doing operations on the object store for user specific index. type UserIndexClient interface { - ListUserFiles(ctx context.Context, tableName, userID string) ([]IndexFile, error) + ListUserFiles(ctx context.Context, tableName, userID string, bypassCache bool) ([]IndexFile, error) GetUserFile(ctx context.Context, tableName, userID, fileName string) (io.ReadCloser, error) PutUserFile(ctx context.Context, tableName, userID, fileName string, file io.ReadSeeker) error DeleteUserFile(ctx context.Context, tableName, userID, fileName string) error @@ -23,7 +22,7 @@ type UserIndexClient interface { // CommonIndexClient allows doing operations on the object store for common index. type CommonIndexClient interface { - ListFiles(ctx context.Context, tableName string) ([]IndexFile, []string, error) + ListFiles(ctx context.Context, tableName string, bypassCache bool) ([]IndexFile, []string, error) GetFile(ctx context.Context, tableName, fileName string) (io.ReadCloser, error) PutFile(ctx context.Context, tableName, fileName string, file io.ReadSeeker) error DeleteFile(ctx context.Context, tableName, fileName string) error @@ -34,13 +33,14 @@ type Client interface { CommonIndexClient UserIndexClient + RefreshIndexListCache(ctx context.Context) ListTables(ctx context.Context) ([]string, error) IsFileNotFoundErr(err error) bool Stop() } type indexStorageClient struct { - objectClient client.ObjectClient + objectClient *cachedObjectClient } type IndexFile struct { @@ -49,15 +49,16 @@ type IndexFile struct { } func NewIndexStorageClient(origObjectClient client.ObjectClient, storagePrefix string) Client { - objectClient := newPrefixedObjectClient(origObjectClient, storagePrefix) - if _, ok := origObjectClient.(*local.FSObjectClient); !ok { - objectClient = newCachedObjectClient(objectClient) - } + objectClient := newCachedObjectClient(newPrefixedObjectClient(origObjectClient, storagePrefix)) return &indexStorageClient{objectClient: objectClient} } +func (s *indexStorageClient) RefreshIndexListCache(ctx context.Context) { + s.objectClient.RefreshIndexListCache(ctx) +} + func (s *indexStorageClient) ListTables(ctx context.Context) ([]string, error) { - _, tables, err := s.objectClient.List(ctx, "", delimiter) + _, tables, err := s.objectClient.List(ctx, "", delimiter, false) if err != nil { return nil, err } @@ -70,11 +71,11 @@ func (s *indexStorageClient) ListTables(ctx context.Context) ([]string, error) { return tableNames, nil } -func (s *indexStorageClient) ListFiles(ctx context.Context, tableName string) ([]IndexFile, []string, error) { +func (s *indexStorageClient) ListFiles(ctx context.Context, tableName string, bypassCache bool) ([]IndexFile, []string, error) { // The forward slash here needs to stay because we are trying to list contents of a directory without which // we will get the name of the same directory back with hosted object stores. // This is due to the object stores not having a concept of directories. - objects, users, err := s.objectClient.List(ctx, tableName+delimiter, delimiter) + objects, users, err := s.objectClient.List(ctx, tableName+delimiter, delimiter, bypassCache) if err != nil { return nil, nil, err } @@ -99,11 +100,11 @@ func (s *indexStorageClient) ListFiles(ctx context.Context, tableName string) ([ return files, userIDs, nil } -func (s *indexStorageClient) ListUserFiles(ctx context.Context, tableName, userID string) ([]IndexFile, error) { +func (s *indexStorageClient) ListUserFiles(ctx context.Context, tableName, userID string, bypassCache bool) ([]IndexFile, error) { // The forward slash here needs to stay because we are trying to list contents of a directory without which // we will get the name of the same directory back with hosted object stores. // This is due to the object stores not having a concept of directories. - objects, _, err := s.objectClient.List(ctx, path.Join(tableName, userID)+delimiter, delimiter) + objects, _, err := s.objectClient.List(ctx, path.Join(tableName, userID)+delimiter, delimiter, bypassCache) if err != nil { return nil, err } diff --git a/pkg/storage/stores/shipper/storage/client_test.go b/pkg/storage/stores/shipper/storage/client_test.go index 13c869b4e6f95..991aebce5c290 100644 --- a/pkg/storage/stores/shipper/storage/client_test.go +++ b/pkg/storage/stores/shipper/storage/client_test.go @@ -36,6 +36,7 @@ func TestIndexStorageClient(t *testing.T) { indexStorageClient := NewIndexStorageClient(objectClient, storageKeyPrefix) verifyFiles := func() { + indexStorageClient.RefreshIndexListCache(context.Background()) tables, err := indexStorageClient.ListTables(context.Background()) require.NoError(t, err) require.Len(t, tables, len(tablesToSetup)) @@ -43,7 +44,7 @@ func TestIndexStorageClient(t *testing.T) { expectedFiles, ok := tablesToSetup[table] require.True(t, ok) - filesInStorage, _, err := indexStorageClient.ListFiles(context.Background(), table) + filesInStorage, _, err := indexStorageClient.ListFiles(context.Background(), table, false) require.NoError(t, err) require.Len(t, filesInStorage, len(expectedFiles)) diff --git a/pkg/storage/stores/shipper/storage/index_set.go b/pkg/storage/stores/shipper/storage/index_set.go index 306fbad9c12a4..2f05f88c1dd2f 100644 --- a/pkg/storage/stores/shipper/storage/index_set.go +++ b/pkg/storage/stores/shipper/storage/index_set.go @@ -13,7 +13,8 @@ var ( // IndexSet provides storage operations for user or common index tables. type IndexSet interface { - ListFiles(ctx context.Context, tableName, userID string) ([]IndexFile, error) + RefreshIndexListCache(ctx context.Context) + ListFiles(ctx context.Context, tableName, userID string, bypassCache bool) ([]IndexFile, error) GetFile(ctx context.Context, tableName, userID, fileName string) (io.ReadCloser, error) PutFile(ctx context.Context, tableName, userID, fileName string, file io.ReadSeeker) error DeleteFile(ctx context.Context, tableName, userID, fileName string) error @@ -44,17 +45,21 @@ func (i indexSet) validateUserID(userID string) error { return nil } -func (i indexSet) ListFiles(ctx context.Context, tableName, userID string) ([]IndexFile, error) { +func (i indexSet) RefreshIndexListCache(ctx context.Context) { + i.client.RefreshIndexListCache(ctx) +} + +func (i indexSet) ListFiles(ctx context.Context, tableName, userID string, bypassCache bool) ([]IndexFile, error) { err := i.validateUserID(userID) if err != nil { return nil, err } if i.userBasedIndex { - return i.client.ListUserFiles(ctx, tableName, userID) + return i.client.ListUserFiles(ctx, tableName, userID, bypassCache) } - files, _, err := i.client.ListFiles(ctx, tableName) + files, _, err := i.client.ListFiles(ctx, tableName, bypassCache) return files, err } diff --git a/pkg/storage/stores/shipper/table_client.go b/pkg/storage/stores/shipper/table_client.go index cd9d767b7a9b5..5759e9242a581 100644 --- a/pkg/storage/stores/shipper/table_client.go +++ b/pkg/storage/stores/shipper/table_client.go @@ -18,6 +18,7 @@ func NewBoltDBShipperTableClient(objectClient client.ObjectClient, storageKeyPre } func (b *boltDBShipperTableClient) ListTables(ctx context.Context) ([]string, error) { + b.indexStorageClient.RefreshIndexListCache(ctx) return b.indexStorageClient.ListTables(ctx) } @@ -30,7 +31,7 @@ func (b *boltDBShipperTableClient) Stop() { } func (b *boltDBShipperTableClient) DeleteTable(ctx context.Context, tableName string) error { - files, _, err := b.indexStorageClient.ListFiles(ctx, tableName) + files, _, err := b.indexStorageClient.ListFiles(ctx, tableName, true) if err != nil { return err } From 47ce070b730ea6f5bdc8c27c8e303c6ffd595728 Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Mon, 2 May 2022 05:11:23 -0300 Subject: [PATCH 041/223] Add missing changelog entry for change introduced by PR 6034. (#6040) * Add missing changelog entry regarding PR 6034. * Move changelog entry to promtail subsection --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0bb8f1282e01..92c8a018a2b02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ## Main + * [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. * [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. @@ -78,7 +79,7 @@ ##### Enhancements * [5715](https://github.com/grafana/loki/pull/5715) **chaudum**: Allow promtail to push RFC5424 formatted syslog messages ##### Fixes - +* [6034](https://github.com/grafana/loki/pull/6034) **DylanGuedes**: Promtail: Fix symlink tailing behavior. ##### Changes * [5686](https://github.com/grafana/loki/pull/5686) **ssncferreira**: Move promtail StreamLagLabels config to upper level config.Config * [5839](https://github.com/grafana/loki/pull/5839) **marctc**: Add ActiveTargets method to promtail From 162e82ebfbb2d7c9c7f9f81f2e0f5c02ab5cb75d Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Mon, 2 May 2022 05:12:09 -0300 Subject: [PATCH 042/223] Use quiet flag in logcli tail.go. (#6033) --- pkg/logcli/query/tail.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/logcli/query/tail.go b/pkg/logcli/query/tail.go index 058ca5a8ac7c1..c9ad092eb0b30 100644 --- a/pkg/logcli/query/tail.go +++ b/pkg/logcli/query/tail.go @@ -36,11 +36,11 @@ func (q *Query) TailQuery(delayFor time.Duration, c client.Client, out output.Lo tailResponse := new(loghttp.TailResponse) - if len(q.IgnoreLabelsKey) > 0 { + if len(q.IgnoreLabelsKey) > 0 && !q.Quiet { log.Println("Ignoring labels key:", color.RedString(strings.Join(q.IgnoreLabelsKey, ","))) } - if len(q.ShowLabelsKey) > 0 { + if len(q.ShowLabelsKey) > 0 && !q.Quiet { log.Println("Print only labels key:", color.RedString(strings.Join(q.ShowLabelsKey, ","))) } From 8a5877ccf13fed95643a5b63f63e0e40967aeb0b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 10:12:47 +0200 Subject: [PATCH 043/223] Bump github.com/minio/minio-go/v7 from 7.0.10 to 7.0.24 (#6007) Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.10 to 7.0.24. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.10...v7.0.24) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 +- .../klauspost/compress/s2/.gitignore | 15 + .../github.com/klauspost/compress/s2/LICENSE | 28 + .../klauspost/compress/s2/README.md | 937 + .../klauspost/compress/s2/decode.go | 762 + .../klauspost/compress/s2/decode_amd64.s | 568 + .../klauspost/compress/s2/decode_arm64.s | 574 + .../klauspost/compress/s2/decode_asm.go | 17 + .../klauspost/compress/s2/decode_other.go | 267 + .../klauspost/compress/s2/encode.go | 1347 ++ .../klauspost/compress/s2/encode_all.go | 456 + .../klauspost/compress/s2/encode_amd64.go | 142 + .../klauspost/compress/s2/encode_best.go | 604 + .../klauspost/compress/s2/encode_better.go | 431 + .../klauspost/compress/s2/encode_go.go | 298 + .../compress/s2/encodeblock_amd64.go | 189 + .../klauspost/compress/s2/encodeblock_amd64.s | 15678 ++++++++++++++++ .../github.com/klauspost/compress/s2/index.go | 525 + vendor/github.com/klauspost/compress/s2/s2.go | 143 + .../minio/minio-go/v7/.golangci.yml | 13 +- .../minio/minio-go/v7/CONTRIBUTING.md | 1 - vendor/github.com/minio/minio-go/v7/Makefile | 7 +- vendor/github.com/minio/minio-go/v7/README.md | 21 +- .../minio-go/v7/api-bucket-encryption.go | 6 +- .../minio/minio-go/v7/api-bucket-lifecycle.go | 10 +- .../minio-go/v7/api-bucket-notification.go | 15 +- .../minio/minio-go/v7/api-bucket-policy.go | 17 +- .../minio-go/v7/api-bucket-replication.go | 151 +- .../minio/minio-go/v7/api-bucket-tagging.go | 6 +- .../minio-go/v7/api-bucket-versioning.go | 8 +- .../minio/minio-go/v7/api-compose-object.go | 45 +- .../minio/minio-go/v7/api-copy-object.go | 2 +- .../minio/minio-go/v7/api-datatypes.go | 24 +- .../minio/minio-go/v7/api-error-response.go | 28 +- .../minio/minio-go/v7/api-get-object-acl.go | 44 +- .../minio/minio-go/v7/api-get-object-file.go | 6 +- .../minio/minio-go/v7/api-get-object.go | 12 +- .../minio/minio-go/v7/api-get-options.go | 3 +- .../github.com/minio/minio-go/v7/api-list.go | 108 +- .../minio-go/v7/api-object-legal-hold.go | 4 +- .../minio/minio-go/v7/api-object-lock.go | 8 +- .../minio/minio-go/v7/api-object-retention.go | 4 +- .../minio/minio-go/v7/api-object-tagging.go | 6 +- .../minio/minio-go/v7/api-presigned.go | 48 +- .../minio/minio-go/v7/api-put-bucket.go | 8 +- .../minio-go/v7/api-put-object-common.go | 10 +- .../v7/api-put-object-file-context.go | 2 +- .../minio-go/v7/api-put-object-multipart.go | 35 +- .../minio-go/v7/api-put-object-streaming.go | 38 +- .../minio/minio-go/v7/api-put-object.go | 53 +- .../minio-go/v7/api-putobject-snowball.go | 215 + .../minio/minio-go/v7/api-remove.go | 197 +- .../minio/minio-go/v7/api-restore.go | 182 + .../minio/minio-go/v7/api-s3-datatypes.go | 6 +- .../minio/minio-go/v7/api-select.go | 22 +- .../github.com/minio/minio-go/v7/api-stat.go | 20 +- vendor/github.com/minio/minio-go/v7/api.go | 205 +- .../minio/minio-go/v7/bucket-cache.go | 11 +- .../github.com/minio/minio-go/v7/constants.go | 16 +- vendor/github.com/minio/minio-go/v7/core.go | 20 +- .../minio/minio-go/v7/functional_tests.go | 12225 ++++++++++++ .../v7/pkg/credentials/assume_role.go | 20 +- .../v7/pkg/credentials/credentials.go | 20 +- .../v7/pkg/credentials/error_response.go | 96 + .../v7/pkg/credentials/file_minio_client.go | 2 +- .../minio-go/v7/pkg/credentials/iam_aws.go | 67 +- .../{signature-type.go => signature_type.go} | 0 .../v7/pkg/credentials/sts_client_grants.go | 23 +- .../v7/pkg/credentials/sts_ldap_identity.go | 114 +- .../v7/pkg/credentials/sts_tls_identity.go | 209 + .../v7/pkg/credentials/sts_web_identity.go | 37 +- .../minio-go/v7/pkg/encrypt/server-side.go | 2 +- .../minio-go/v7/pkg/lifecycle/lifecycle.go | 188 +- .../v7/pkg/notification/notification.go | 6 +- .../v7/pkg/replication/replication.go | 280 +- .../minio/minio-go/v7/pkg/s3utils/utils.go | 19 +- .../pkg/signer/request-signature-streaming.go | 8 +- .../v7/pkg/signer/request-signature-v2.go | 21 +- .../v7/pkg/signer/request-signature-v4.go | 55 +- .../minio/minio-go/v7/pkg/signer/utils.go | 4 + .../minio/minio-go/v7/post-policy.go | 4 +- .../minio/minio-go/v7/retry-continous.go | 4 +- vendor/github.com/minio/minio-go/v7/retry.go | 5 +- .../github.com/minio/minio-go/v7/transport.go | 1 + vendor/github.com/minio/minio-go/v7/utils.go | 253 +- vendor/modules.txt | 5 +- 87 files changed, 37657 insertions(+), 636 deletions(-) create mode 100644 vendor/github.com/klauspost/compress/s2/.gitignore create mode 100644 vendor/github.com/klauspost/compress/s2/LICENSE create mode 100644 vendor/github.com/klauspost/compress/s2/README.md create mode 100644 vendor/github.com/klauspost/compress/s2/decode.go create mode 100644 vendor/github.com/klauspost/compress/s2/decode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/s2/decode_arm64.s create mode 100644 vendor/github.com/klauspost/compress/s2/decode_asm.go create mode 100644 vendor/github.com/klauspost/compress/s2/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_all.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_best.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_better.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_go.go create mode 100644 vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go create mode 100644 vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s create mode 100644 vendor/github.com/klauspost/compress/s2/index.go create mode 100644 vendor/github.com/klauspost/compress/s2/s2.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-restore.go create mode 100644 vendor/github.com/minio/minio-go/v7/functional_tests.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go rename vendor/github.com/minio/minio-go/v7/pkg/credentials/{signature-type.go => signature_type.go} (100%) create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go diff --git a/go.mod b/go.mod index 7c55aa74f1bc7..b12ce2bc31375 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( github.com/klauspost/compress v1.14.1 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-ieproxy v0.0.1 - github.com/minio/minio-go/v7 v7.0.10 + github.com/minio/minio-go/v7 v7.0.24 github.com/mitchellh/mapstructure v1.4.3 github.com/modern-go/reflect2 v1.0.2 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f diff --git a/go.sum b/go.sum index 74527af9d9af2..5752cdbf13979 100644 --- a/go.sum +++ b/go.sum @@ -1287,6 +1287,7 @@ github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= @@ -1410,8 +1411,9 @@ github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.10 h1:1oUKe4EOPUEhw2qnPQaPsJ0lmVTYLFu03SiItauXs94= github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= +github.com/minio/minio-go/v7 v7.0.24 h1:HPlHiET6L5gIgrHRaw1xFo1OaN4bEP/082asWh3WJtI= +github.com/minio/minio-go/v7 v7.0.24/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= @@ -2062,6 +2064,7 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore new file mode 100644 index 0000000000000..3a89c6e3e2603 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/.gitignore @@ -0,0 +1,15 @@ +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE new file mode 100644 index 0000000000000..1d2d645bd939f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md new file mode 100644 index 0000000000000..e6716aeaee998 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -0,0 +1,937 @@ +# S2 Compression + +S2 is an extension of [Snappy](https://github.com/google/snappy). + +S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. + +Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. +This means that S2 can seamlessly replace Snappy without converting compressed content. + +S2 can produce Snappy compatible output, faster and better than Snappy. +If you want full benefit of the changes you should use s2 without Snappy compatibility. + +S2 is designed to have high throughput on content that cannot be compressed. +This is important, so you don't have to worry about spending CPU cycles on already compressed data. + +## Benefits over Snappy + +* Better compression +* Adjustable compression (3 levels) +* Concurrent stream compression +* Faster decompression, even for Snappy compatible content +* Ability to quickly skip forward in compressed stream +* Random seeking with indexes +* Compatible with reading Snappy compressed content +* Smaller block size overhead on incompressible blocks +* Block concatenation +* Uncompressed stream mode +* Automatic stream size padding +* Snappy compatible block compression + +## Drawbacks over Snappy + +* Not optimized for 32 bit systems +* Streams use slightly more memory due to larger blocks and concurrency (configurable) + +# Usage + +Installation: `go get -u github.com/klauspost/compress/s2` + +Full package documentation: + +[![godoc][1]][2] + +[1]: https://godoc.org/github.com/klauspost/compress?status.svg +[2]: https://godoc.org/github.com/klauspost/compress/s2 + +## Compression + +```Go +func EncodeStream(src io.Reader, dst io.Writer) error { + enc := s2.NewWriter(dst) + _, err := io.Copy(enc, src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. + +For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. + +The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. +It is possible to flush any buffered data using the `Flush()` method. +This will block until all data sent to the encoder has been written to the output. + +S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. + +As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, +a slightly more efficient method is to use the `EncodeBuffer` method. +This will take ownership of the buffer until the stream is closed. + +```Go +func EncodeStream(src []byte, dst io.Writer) error { + enc := s2.NewWriter(dst) + // The encoder owns the buffer until Flush or Close is called. + err := enc.EncodeBuffer(buf) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, +so it should only be used a single time per stream. +If you need to write several blocks, you should use the regular io.Writer interface. + + +## Decompression + +```Go +func DecodeStream(src io.Reader, dst io.Writer) error { + dec := s2.NewReader(src) + _, err := io.Copy(dst, dec) + return err +} +``` + +Similar to the Writer, a Reader can be reused using the `Reset` method. + +For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. +However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. + +For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. +Do however note that these functions (similar to Snappy) does not provide validation of data, +so data corruption may be undetected. Stream encoding provides CRC checks of data. + +It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. +For big skips the decompressor is able to skip blocks without decompressing them. + +## Single Blocks + +Similar to Snappy S2 offers single block compression. +Blocks do not offer the same flexibility and safety as streams, +but may be preferable for very small payloads, less than 100K. + +Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. +It is possible to provide a destination buffer. +If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. +If not a new will be allocated. + +Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. + +Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. +Again an optional destination buffer can be supplied. +The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. +If that is not satisfied a new buffer will be allocated. + +Block function always operate on a single goroutine since it should only be used for small payloads. + +# Commandline tools + +Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. + +Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). + +Installing then requires Go to be installed. To install them, use: + +`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` + +To build binaries to the current folder use: + +`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` + + +## s2c + +``` +Usage: s2c [options] file1 file2 + +Compresses all files supplied as input separately. +Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. +By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and compressed. +Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -blocksize string + Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -faster + Compress faster, but with a minor compression loss + -help + Display help + -index + Add seek index (default true) + -o string + Write output to another file. Single input file only + -pad string + Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -slower + Compress more, but a lot slower + -snappy + Generate Snappy compatible output stream + -verify + Verify written files + +``` + +## s2d + +``` +Usage: s2d [options] file1 file2 + +Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. +Output file names have the extension removed. By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and decompressed. +Extensions on downloaded files are ignored. Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -c Write all output to stdout. Multiple input files will be concatenated + -help + Display help + -o string + Write output to another file. Single input file only + -offset string + Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful decompression + -safe + Do not overwrite output files + -tail string + Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -verify + Verify files, but do not write output +``` + +## s2sx: self-extracting archives + +s2sx allows creating self-extracting archives with no dependencies. + +By default, executables are created for the same platforms as the host os, +but this can be overridden with `-os` and `-arch` parameters. + +Extracted files have 0666 permissions, except when untar option used. + +``` +Usage: s2sx [options] file1 file2 + +Compresses all files supplied as input separately. +If files have '.s2' extension they are assumed to be compressed already. +Output files are written as 'filename.s2sx' and with '.exe' for windows targets. +If output is big, an additional file with ".more" is written. This must be included as well. +By default output files will be overwritten. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +Options: + -arch string + Destination architecture (default "amd64") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -help + Display help + -max string + Maximum executable size. Rest will be written to another file. (default "1G") + -os string + Destination operating system (default "windows") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -untar + Untar on destination +``` + +Available platforms are: + + * darwin-amd64 + * darwin-arm64 + * linux-amd64 + * linux-arm + * linux-arm64 + * linux-mips64 + * linux-ppc64le + * windows-386 + * windows-amd64 + +By default, there is a size limit of 1GB for the output executable. + +When this is exceeded the remaining file content is written to a file called +output+`.more`. This file must be included for a successful extraction and +placed alongside the executable for a successful extraction. + +This file *must* have the same name as the executable, so if the executable is renamed, +so must the `.more` file. + +This functionality is disabled with stdin/stdout. + +### Self-extracting TAR files + +If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. + +Files are extracted to the current folder with the path specified in the tar file. + +Note that tar files are not validated before they are wrapped. + +For security reasons files that move below the root folder are not allowed. + +# Performance + +This section will focus on comparisons to Snappy. +This package is solely aimed at replacing Snappy as a high speed compression package. +If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) +gives better compression, but typically at speeds slightly below "better" mode in this package. + +Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. + +Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. + +A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. +The content compressed in this mode is fully compatible with the standard decoder. + +Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): + +| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | +| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | +| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | +| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | +| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | +| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | +| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | +| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | +| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | +| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | +| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | +| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | +| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | + +### Legend + +* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 throughput`: Throughput of S2 in MB/s. +* `S2 % smaller`: How many percent of the Snappy output size is S2 better. +* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. + +There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. + +Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. + +The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. + +Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. +This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). + +## Decompression + +S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. + +S2 vs Snappy **decompression** speed. Both operating on single core: + +| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | +|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | +| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | + +### Legend + +* `S2 Throughput`: Decompression speed of S2 encoded content. +* `Better Throughput`: Decompression speed of S2 "better" encoded content. +* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. + + +While the decompression code hasn't changed, there is a significant speedup in decompression speed. +S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. +While this reduces compression a bit, it improves decompression speed. + +The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. + +Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: + +| File | S2 Throughput | S2 throughput | +|--------------------------------|--------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | + +Even though S2 typically compresses better than Snappy, decompression speed is always better. + +## Block compression + + +When compressing blocks no concurrent compression is performed just as Snappy. +This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. + +An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. +In rare, worst case scenario Snappy blocks could be significantly bigger than the input. + +### Mixed content blocks + +The most reliable is a wide dataset. +For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | +| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | +| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | +| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | +| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | + +S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". +"Better" mode provides the same compression speed as LZ4 with better compression ratio. + +When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. + +As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. + +Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for +other Go compressors: + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | +| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | +| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | +| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | + +### Standard block compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. + +Block compression. Parallel benchmark running on 16 cores, 16 goroutines. + +AMD64 assembly is use for both S2 and Snappy. + +| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | +|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| +| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | + + +| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed | +|-----------------------|-------------|------------------|----------|--------------| +| html | 22.31% | 7.58% | 1.07x | 1.20x | +| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x | +| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x | +| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x | +| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x | +| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x | +| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x | +| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x | +| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x | +| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x | +| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x | +| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x | +| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x | +| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x | +| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x | +| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x | + +Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. + +Decompression speed is better than Snappy, except in one case. + +Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. + +Size is on average around Snappy, but varies on content type. +In cases where compression is worse, it usually is compensated by a speed boost. + + +### Better compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | +|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| +| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | + + +| Relative Perf | Snappy size | Better size | Better Speed | Better dec | +|-----------------------|-------------|-------------|--------------|------------| +| html | 22.31% | 13.18% | 0.48x | 0.98x | +| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x | +| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x | +| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x | +| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x | +| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x | +| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x | +| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x | +| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x | +| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x | +| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x | +| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x | +| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x | +| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x | +| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x | +| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x | + +Except for the mostly incompressible JPEG image compression is better and usually in the +double digits in terms of percentage reduction over Snappy. + +The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder +to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. + +This mode aims to provide better compression at the expense of performance and achieves that +without a huge performance penalty, except on very small blocks. + +Decompression speed suffers a little compared to the regular S2 mode, +but still manages to be close to Snappy in spite of increased compression. + +# Best compression mode + +S2 offers a "best" compression mode. + +This will compress as much as possible with little regard to CPU usage. + +Mainly for offline compression, but where decompression speed should still +be high and compatible with other S2 compressed data. + +Some examples compared on 16 core CPU, amd64 assembly used: + +``` +* enwik10 +Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s +Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s +Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s + +* github-june-2days-2019.json +Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s + +* nyc-taxi-data-10M.csv +Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s + +* 10gb.tar +Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s +Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ + +* consensus.db.10gb +Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s +Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s +Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s +``` + +Decompression speed should be around the same as using the 'better' compression mode. + +# Snappy Compatibility + +S2 now offers full compatibility with Snappy. + +This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. + +There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by +simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. +This uses "better" mode for all operations. +If you would like more control, you can use the s2 package as described below: + +## Blocks + +Snappy compatible blocks can be generated with the S2 encoder. +Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace + +| Snappy | S2 replacement | +|----------------------------|-------------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | + +`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. + +`s2.ConcatBlocks` is compatible with snappy blocks. + +Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: + +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------ +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| + +## Streams + +For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. +All other options are available, but note that block size limit is different for snappy. + +Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: + +| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | +|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| +| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | +| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | +| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | +| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | +| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | + +# Decompression + +All decompression functions map directly to equivalent s2 functions. + +| Snappy | S2 replacement | +|------------------------|--------------------| +| snappy.Decode(...) | s2.Decode(...) | +| snappy.DecodedLen(...) | s2.DecodedLen(...) | +| snappy.NewReader(...) | s2.NewReader(...) | + +Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) +are also available for Snappy streams. + +If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) +on your Reader will reduce memory consumption. + +# Concatenating blocks and streams. + +Concatenating streams will concatenate the output of both without recompressing them. +While this is inefficient in terms of compression it might be usable in certain scenarios. +The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. + +Blocks can be concatenated using the `ConcatBlocks` function. + +Snappy blocks/streams can safely be concatenated with S2 blocks and streams. +Streams with indexes (see below) will currently not work on concatenated streams. + +# Stream Seek Index + +S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. + +The index can either be appended to the stream as a skippable block or returned for separate storage. + +When the index is appended to a stream it will be skipped by regular decoders, +so the output remains compatible with other decoders. + +## Creating an Index + +To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. +Then the index will be added to the stream when `Close()` is called. + +``` + // Add Index to stream... + enc := s2.NewWriter(w, s2.WriterAddIndex()) + io.Copy(enc, r) + enc.Close() +``` + +If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. +This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. + +``` + // Get index for separate storage... + enc := s2.NewWriter(w) + io.Copy(enc, r) + index, err := enc.CloseIndex() +``` + +The `index` can then be used needing to read from the stream. +This means the index can be used without needing to seek to the end of the stream +or for manually forwarding streams. See below. + +Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. + +## Using Indexes + +To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. + +Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. + +If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. +Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, nil) + rs.Seek(wantOffset, io.SeekStart) +``` + +Get a seeker to seek forward. Since no index is provided, the index is read from the stream. +This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +A custom index can be specified which will be used if supplied. +When using a custom index, it will not be read from the input stream. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(true, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. + +The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, +meaning changes performed to one is reflected in the other. + +To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. + +## Manually Forwarding Streams + +Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. +This can be used for parsing indexes, either separate or in streams. + +In some cases it may not be possible to serve a seekable stream. +This can for instance be an HTTP stream, where the Range request +is sent at the start of the stream. + +With a little bit of extra code it is still possible to use indexes +to forward to specific offset with a single forward skip. + +It is possible to load the index manually like this: +``` + var index s2.Index + _, err = index.Load(idxBytes) +``` + +This can be used to figure out how much to offset the compressed stream: + +``` + compressedOffset, uncompressedOffset, err := index.Find(wantOffset) +``` + +The `compressedOffset` is the number of bytes that should be skipped +from the beginning of the compressed file. + +The `uncompressedOffset` will then be offset of the uncompressed bytes returned +when decoding from that position. This will always be <= wantOffset. + +When creating a decoder it must be specified that it should *not* expect a stream identifier +at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` +we create the decoder like this: + +``` + dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) +``` + +We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. +This is done using the regular "Skip" function: + +``` + err = dec.Skip(wantOffset - uncompressedOffset) +``` + +This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. + +## Index Format: + +Each block is structured as a snappy skippable block, with the chunk ID 0x99. + +The block can be read from the front, but contains information so it can be read from the back as well. + +Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), +with un-encoded value length of 64 bits, unless other limits are specified. + +| Content | Format | +|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | + +For regular streams the uncompressed offsets are fully predictable, +so `HasUncompressedOffsets` allows to specify that compressed blocks all have +exactly `EstBlockSize` bytes of uncompressed content. + +Entries *must* be in order, starting with the lowest offset, +and there *must* be no uncompressed offset duplicates. +Entries *may* point to the start of a skippable block, +but it is then not allowed to also have an entry for the next block since +that would give an uncompressed offset duplicate. + +There is no requirement for all blocks to be represented in the index. +In fact there is a maximum of 65536 block entries in an index. + +The writer can use any method to reduce the number of entries. +An implicit block start at 0,0 can be assumed. + +### Decoding entries: + +``` +// Read Uncompressed entries. +// Each assumes EstBlockSize delta from previous. +for each entry { + uOff = 0 + if HasUncompressedOffsets == 1 { + uOff = ReadVarInt // Read value from stream + } + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].UncompressedOffset = uOff + continue + } + + // Uncompressed uses previous offset and adds EstBlockSize + entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize +} + + +// Guess that the first block will be 50% of uncompressed size. +// Integer truncating division must be used. +CompressGuess := EstBlockSize / 2 + +// Read Compressed entries. +// Each assumes CompressGuess delta from previous. +// CompressGuess is adjusted for each value. +for each entry { + cOff = ReadVarInt // Read value from stream + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].CompressedOffset = cOff + continue + } + + // Compressed uses previous and our estimate. + entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + + // Adjust compressed offset for next loop, integer truncating division must be used. + CompressGuess += cOff/2 +} +``` + +# Format Extensions + +* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. +* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). +* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. + +Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. + +The length is specified by reading the 3-bit length specified in the tag and decode using this table: + +| Length | Actual Length | +|--------|----------------------| +| 0 | 4 | +| 1 | 5 | +| 2 | 6 | +| 3 | 7 | +| 4 | 8 | +| 5 | 8 + read 1 byte | +| 6 | 260 + read 2 bytes | +| 7 | 65540 + read 3 bytes | + +This allows any repeat offset + length to be represented by 2 to 5 bytes. + +Lengths are stored as little endian values. + +The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams. + +Default streaming block size is 1MB. + +# LICENSE + +This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. + +Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go new file mode 100644 index 0000000000000..9e7fce8856e06 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -0,0 +1,762 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("s2: corrupt input") + // ErrCRC reports that the input failed CRC validation (streams only) + ErrCRC = errors.New("s2: corrupt input, crc mismatch") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("s2: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("s2: unsupported input") +) + +// ErrCantSeek is returned if the stream cannot be seeked. +type ErrCantSeek struct { + Reason string +} + +// Error returns the error as string. +func (e ErrCantSeek) Error() string { + return fmt.Sprintf("s2: Can't seek because %s", e.Reason) +} + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2Decode(dst, src[s:]) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. +func NewReader(r io.Reader, opts ...ReaderOption) *Reader { + nr := Reader{ + r: r, + maxBlock: maxBlockSize, + } + for _, opt := range opts { + if err := opt(&nr); err != nil { + nr.err = err + return &nr + } + } + nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize + if nr.lazyBuf > 0 { + nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) + } else { + nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) + } + nr.readHeader = nr.ignoreStreamID + nr.paramsOK = true + return &nr +} + +// ReaderOption is an option for creating a decoder. +type ReaderOption func(*Reader) error + +// ReaderMaxBlockSize allows to control allocations if the stream +// has been compressed with a smaller WriterBlockSize, or with the default 1MB. +// Blocks must be this size or smaller to decompress, +// otherwise the decoder will return ErrUnsupported. +// +// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). +// +// Default is the maximum limit of 4MB. +func ReaderMaxBlockSize(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize <= 0 { + return errors.New("s2: block size too large. Must be <= 4MB and > 0") + } + if r.lazyBuf == 0 && blockSize < defaultBlockSize { + r.lazyBuf = blockSize + } + r.maxBlock = blockSize + return nil + } +} + +// ReaderAllocBlock allows to control upfront stream allocations +// and not allocate for frames bigger than this initially. +// If frames bigger than this is seen a bigger buffer will be allocated. +// +// Default is 1MB, which is default output size. +func ReaderAllocBlock(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize < 1024 { + return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") + } + r.lazyBuf = blockSize + return nil + } +} + +// ReaderIgnoreStreamIdentifier will make the reader skip the expected +// stream identifier at the beginning of the stream. +// This can be used when serving a stream that has been forwarded to a specific point. +func ReaderIgnoreStreamIdentifier() ReaderOption { + return func(r *Reader) error { + r.ignoreStreamID = true + return nil + } +} + +// ReaderSkippableCB will register a callback for chuncks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { + return func(r *Reader) error { + if id < 0x80 || id > 0xfd { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") + } + r.skippableCB[id] = fn + return nil + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + skippableCB [0x80]func(r io.Reader) error + blockStart int64 // Uncompressed offset at start of current. + index *Index + + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + // maximum block size allowed. + maxBlock int + // maximum expected buffer size. + maxBufSize int + // alloc a buffer this size if > 0. + lazyBuf int + readHeader bool + paramsOK bool + snappyFrame bool + ignoreStreamID bool +} + +// ensureBufferSize will ensure that the buffer can take at least n bytes. +// If false is returned the buffer exceeds maximum allowed size. +func (r *Reader) ensureBufferSize(n int) bool { + if len(r.buf) >= n { + return true + } + if n > r.maxBufSize { + r.err = ErrCorrupt + return false + } + // Realloc buffer. + r.buf = make([]byte, n) + return true +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + if !r.paramsOK { + return + } + r.index = nil + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = r.ignoreStreamID +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// skippable will skip n bytes. +// If the supplied reader supports seeking that is used. +// tmp is used as a temporary buffer for reading. +// The supplied slice does not need to be the size of the read. +func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { + if id < 0x80 { + r.err = fmt.Errorf("interbal error: skippable id < 0x80") + return false + } + if fn := r.skippableCB[id-0x80]; fn != nil { + rd := io.LimitReader(r.r, int64(n)) + r.err = fn(rd) + if r.err != nil { + return false + } + _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) + return r.err == nil + } + if rs, ok := r.r.(io.ReadSeeker); ok { + _, err := rs.Seek(int64(n), io.SeekCurrent) + if err == nil { + return true + } + if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + return false + } + } + for n > 0 { + if n < len(tmp) { + tmp = tmp[:n] + } + if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + n -= len(tmp) + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } +} + +// Skip will skip n bytes forward in the decompressed output. +// For larger skips this consumes less CPU and is faster than reading output and discarding it. +// CRC is not checked on skipped blocks. +// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. +// If a decoding error is encountered subsequent calls to Read will also fail. +func (r *Reader) Skip(n int64) error { + if n < 0 { + return errors.New("attempted negative skip") + } + if r.err != nil { + return r.err + } + + for n > 0 { + if r.i < r.j { + // Skip in buffer. + // decoded[i:j] contains decoded bytes that have not yet been passed on. + left := int64(r.j - r.i) + if left >= n { + r.i += int(n) + return nil + } + n -= int64(r.j - r.i) + r.i = r.j + } + + // Buffer empty; read blocks until we have content. + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = io.ErrUnexpectedEOF + } + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + dLen, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if dLen > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + // Check if destination is within this block + if int64(dLen) > n { + if len(r.decoded) < dLen { + r.decoded = make([]byte, dLen) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:dLen]) != checksum { + r.err = ErrCorrupt + return r.err + } + } else { + // Skip block completely + n -= int64(dLen) + dLen = 0 + } + r.i, r.j = 0, dLen + continue + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err != nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n2 := chunkLen - checksumSize + if n2 > len(r.decoded) { + if n2 > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + r.decoded = make([]byte, n2) + } + if !r.readFull(r.decoded[:n2], false) { + return r.err + } + if int64(n2) < n { + if crc(r.decoded[:n2]) != checksum { + r.err = ErrCorrupt + return r.err + } + } + r.i, r.j = 0, n2 + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return r.err + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + if chunkLen > maxChunkSize { + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return r.err + } + } + return nil +} + +// ReadSeeker provides random or forward seeking in compressed content. +// See Reader.ReadSeeker +type ReadSeeker struct { + *Reader +} + +// ReadSeeker will return an io.ReadSeeker compatible version of the reader. +// If 'random' is specified the returned io.Seeker can be used for +// random seeking, otherwise only forward seeking is supported. +// Enabling random seeking requires the original input to support +// the io.Seeker interface. +// A custom index can be specified which will be used if supplied. +// When using a custom index, it will not be read from the input stream. +// The returned ReadSeeker contains a shallow reference to the existing Reader, +// meaning changes performed to one is reflected in the other. +func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { + // Read index if provided. + if len(index) != 0 { + if r.index == nil { + r.index = &Index{} + } + if _, err := r.index.Load(index); err != nil { + return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} + } + } + + // Check if input is seekable + rs, ok := r.r.(io.ReadSeeker) + if !ok { + if !random { + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream isn't seekable"} + } + + if r.index != nil { + // Seekable and index, ok... + return &ReadSeeker{Reader: r}, nil + } + + // Load from stream. + r.index = &Index{} + + // Read current position. + pos, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + err = r.index.LoadStream(rs) + if err != nil { + if err == ErrUnsupported { + return nil, ErrCantSeek{Reason: "input stream does not contain an index"} + } + return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} + } + + // reset position. + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + return &ReadSeeker{Reader: r}, nil +} + +// Seek allows seeking in compressed data. +func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { + if r.err != nil { + return 0, r.err + } + if offset == 0 && whence == io.SeekCurrent { + return r.blockStart + int64(r.i), nil + } + if !r.readHeader { + // Make sure we read the header. + _, r.err = r.Read([]byte{}) + } + rs, ok := r.r.(io.ReadSeeker) + if r.index == nil || !ok { + if whence == io.SeekCurrent && offset >= 0 { + err := r.Skip(offset) + return r.blockStart + int64(r.i), err + } + if whence == io.SeekStart && offset >= r.blockStart+int64(r.i) { + err := r.Skip(offset - r.blockStart - int64(r.i)) + return r.blockStart + int64(r.i), err + } + return 0, ErrUnsupported + + } + + switch whence { + case io.SeekCurrent: + offset += r.blockStart + int64(r.i) + case io.SeekEnd: + offset = -offset + } + c, u, err := r.index.Find(offset) + if err != nil { + return r.blockStart + int64(r.i), err + } + + // Seek to next block + _, err = rs.Seek(c, io.SeekStart) + if err != nil { + return 0, err + } + + if offset < 0 { + offset = r.index.TotalUncompressed + offset + } + + r.i = r.j // Remove rest of current block. + if u < offset { + // Forward inside block + return offset, r.Skip(offset - u) + } + return offset, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + if r.i < r.j { + c := r.decoded[r.i] + r.i++ + return c, nil + } + var tmp [1]byte + for i := 0; i < 10; i++ { + n, err := r.Read(tmp[:]) + if err != nil { + return 0, err + } + if n == 1 { + return tmp[0], nil + } + } + return 0, io.ErrNoProgress +} + +// SkippableCB will register a callback for chunks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// Sending a nil function will disable previous callbacks. +func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") + } + r.skippableCB[id] = fn + return nil +} diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s new file mode 100644 index 0000000000000..9b105e03c59ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s @@ -0,0 +1,568 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 AX +#define R_TMP1 BX +#define R_LEN CX +#define R_OFF DX +#define R_SRC SI +#define R_DST DI +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x (shared) +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $48-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DST + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SRC + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + XORQ R_OFF, R_OFF + +loop: + // for s < len(src) + CMPQ R_SRC, R_SEND + JEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (R_SRC), R_LEN + MOVL R_LEN, R_TMP1 + ANDL $3, R_TMP1 + CMPL R_TMP1, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, R_LEN + CMPL R_LEN, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + INCQ R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVQ R_DEND, R_TMP0 + SUBQ R_DST, R_TMP0 + MOVQ R_SEND, R_TMP1 + SUBQ R_SRC, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ R_LEN, $16 + JGT callMemmove + CMPQ R_TMP0, $16 + JLT callMemmove + CMPQ R_TMP1, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R_SRC), X0 + MOVOU X0, 0(R_DST) + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ R_LEN, R_TMP0 + JGT errCorrupt + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ R_DST, 0(SP) + MOVQ R_SRC, 8(SP) + MOVQ R_LEN, 16(SP) + MOVQ R_DST, 24(SP) + MOVQ R_SRC, 32(SP) + MOVQ R_LEN, 40(SP) + MOVQ R_OFF, 48(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVQ 24(SP), R_DST + MOVQ 32(SP), R_SRC + MOVQ 40(SP), R_LEN + MOVQ 48(SP), R_OFF + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ R_LEN, R_SRC + SUBQ $58, R_SRC + CMPQ R_SRC, R_SEND + JA errCorrupt + + // case x == 60: + CMPL R_LEN, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(R_SRC), R_LEN + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(R_SRC), R_LEN + JMP doLit + +tagLit62Plus: + CMPL R_LEN, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + // We read one byte, safe to read one back, since we are just reading tag. + // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(R_SRC), R_LEN + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(R_SRC), R_OFF + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(R_SRC), R_OFF + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMPQ R_TMP1, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // length = 4 + int(src[s-2])>>2&0x7 + MOVBQZX -1(R_SRC), R_TMP1 + MOVQ R_LEN, R_TMP0 + SHRQ $2, R_LEN + ANDQ $0xe0, R_TMP0 + ANDQ $7, R_LEN + SHLQ $3, R_TMP0 + ADDQ $4, R_LEN + ORQ R_TMP1, R_TMP0 + + // check if repeat code, ZF set by ORQ. + JZ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (length) + MOVQ R_TMP0, R_OFF + JMP doCopy + +// This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMPQ R_LEN, $9 + JL doCopyRepeat + + // Read additional bytes for length. + JE repeatLen1 + + // Rare, so the extra branch shouldn't hurt too much. + CMPQ R_LEN, $10 + JE repeatLen2 + JMP repeatLen3 + +// Read repeat lengths. +repeatLen1: + // s ++ + ADDQ $1, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = src[s-1] + 8 + MOVBQZX -1(R_SRC), R_LEN + ADDL $8, R_LEN + JMP doCopyRepeat + +repeatLen2: + // s +=2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) + MOVWQZX -2(R_SRC), R_LEN + ADDL $260, R_LEN + JMP doCopyRepeat + +repeatLen3: + // s +=3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) + // Read one byte further back (just part of the tag, shifted out) + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + ADDL $65540, R_LEN + JMP doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVQ R_DST, R_TMP1 + SUBQ R_DBASE, R_TMP1 + CMPQ R_TMP1, R_OFF + JLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + // if offset <= 0 { etc } + CMPQ R_OFF, $0 + JLE errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R_DEND, R_TMP1 + SUBQ R_DST, R_TMP1 + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVQ R_DEND, R_TMP2 + SUBQ R_DST, R_TMP2 + MOVQ R_DST, R_TMP3 + SUBQ R_OFF, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ R_LEN, $16 + JGT slowForwardCopy + CMPQ R_OFF, $8 + JLT slowForwardCopy + CMPQ R_TMP2, $16 + JLT slowForwardCopy + MOVQ 0(R_TMP3), R_TMP0 + MOVQ R_TMP0, 0(R_DST) + MOVQ 8(R_TMP3), R_TMP1 + MOVQ R_TMP1, 8(R_DST) + ADDQ R_LEN, R_DST + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R_TMP2 + CMPQ R_LEN, R_TMP2 + JGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVQ R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMPQ R_TMP2, $8 + JGE fixUpSlowForwardCopy + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_DST) + SUBQ R_TMP2, R_LEN + ADDQ R_TMP2, R_DST + ADDQ R_TMP2, R_TMP2 + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ R_DST, R_TMP0 + ADDQ R_LEN, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ R_LEN, $0 + JLE loop + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_TMP0) + ADDQ $8, R_TMP3 + ADDQ $8, R_TMP0 + SUBQ $8, R_LEN + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + INCQ R_TMP3 + INCQ R_DST + DECQ R_LEN + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ R_DST, R_DEND + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s new file mode 100644 index 0000000000000..4b63d5086a937 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -0,0 +1,574 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 R2 +#define R_TMP1 R3 +#define R_LEN R4 +#define R_OFF R5 +#define R_SRC R6 +#define R_DST R7 +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// TEST_SRC will check if R_SRC is <= SRC_END +#define TEST_SRC() \ + CMP R_SEND, R_SRC \ + BGT errCorrupt + +// MOVD R_SRC, R_TMP1 +// SUB R_SBASE, R_TMP1, R_TMP1 +// CMP R_SLEN, R_TMP1 +// BGT errCorrupt + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $56-64 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DST + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SRC + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + MOVD $0, R_OFF + +loop: + // for s < len(src) + CMP R_SEND, R_SRC + BEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R_SRC), R_LEN + MOVW R_LEN, R_TMP1 + ANDW $3, R_TMP1 + MOVW $1, R1 + CMPW R1, R_TMP1 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R_LEN, R_LEN + CMPW R_LEN, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R_SRC, R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + ADD $1, R_LEN, R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVD R_DEND, R_TMP0 + SUB R_DST, R_TMP0, R_TMP0 + MOVD R_SEND, R_TMP1 + SUB R_SRC, R_TMP1, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R_LEN + BGT callMemmove + CMP $16, R_TMP0 + BLT callMemmove + CMP $16, R_TMP1 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R_SRC), (R_TMP2, R_TMP3) + STP (R_TMP2, R_TMP3), 0(R_DST) + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R_TMP0, R_LEN + BGT errCorrupt + CMP R_TMP1, R_LEN + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R_DST, 8(RSP) + MOVD R_SRC, 16(RSP) + MOVD R_LEN, 24(RSP) + MOVD R_DST, 32(RSP) + MOVD R_SRC, 40(RSP) + MOVD R_LEN, 48(RSP) + MOVD R_OFF, 56(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVD 32(RSP), R_DST + MOVD 40(RSP), R_SRC + MOVD 48(RSP), R_LEN + MOVD 56(RSP), R_OFF + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R_LEN, R_SRC, R_SRC + SUB $58, R_SRC, R_SRC + TEST_SRC() + + // case x == 60: + MOVW $61, R1 + CMPW R1, R_LEN + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R_SRC), R_LEN + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R_SRC), R_LEN + B doLit + +tagLit62Plus: + CMPW $62, R_LEN + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R_SRC), R_LEN + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP1<<16, R_LEN + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R_SRC), R_LEN + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + MOVD R_SRC, R_TMP1 + SUB R_SBASE, R_TMP1, R_TMP1 + CMP R_SLEN, R_TMP1 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R_SRC), R_OFF + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R_SRC), R_OFF + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMP $2, R_TMP1 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // Calculate offset in R_TMP0 in case it is a repeat. + MOVD R_LEN, R_TMP0 + AND $0xe0, R_TMP0 + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP0<<3, R_TMP1, R_TMP0 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R_LEN>>2, R1, R_LEN + ADD $4, R_LEN, R_LEN + + // check if repeat code with offset 0. + CMP $0, R_TMP0 + BEQ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (offset) + MOVD R_TMP0, R_OFF + B doCopy + + // This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMP $9, R_LEN + BLT doCopyRepeat + BEQ repeatLen1 + CMP $10, R_LEN + BEQ repeatLen2 + +repeatLen3: + // s +=3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 + MOVBU -1(R_SRC), R_TMP0 + MOVHU -3(R_SRC), R_LEN + ORR R_TMP0<<16, R_LEN, R_LEN + ADD $65540, R_LEN, R_LEN + B doCopyRepeat + +repeatLen2: + // s +=2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 + MOVHU -2(R_SRC), R_LEN + ADD $260, R_LEN, R_LEN + B doCopyRepeat + +repeatLen1: + // s +=1 + ADD $1, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = src[s-1] + 8 + MOVBU -1(R_SRC), R_LEN + ADD $8, R_LEN, R_LEN + B doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVD R_DST, R_TMP1 + SUB R_DBASE, R_TMP1, R_TMP1 + CMP R_OFF, R_TMP1 + BLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + + // if offset <= 0 { etc } + CMP $0, R_OFF + BLE errCorrupt + + // if length > len(dst)-d { etc } + MOVD R_DEND, R_TMP1 + SUB R_DST, R_TMP1, R_TMP1 + CMP R_TMP1, R_LEN + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVD R_DEND, R_TMP2 + SUB R_DST, R_TMP2, R_TMP2 + MOVD R_DST, R_TMP3 + SUB R_OFF, R_TMP3, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R_LEN + BGT slowForwardCopy + CMP $8, R_OFF + BLT slowForwardCopy + CMP $16, R_TMP2 + BLT slowForwardCopy + MOVD 0(R_TMP3), R_TMP0 + MOVD R_TMP0, 0(R_DST) + MOVD 8(R_TMP3), R_TMP1 + MOVD R_TMP1, 8(R_DST) + ADD R_LEN, R_DST, R_DST + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R_TMP2, R_TMP2 + CMP R_TMP2, R_LEN + BGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVD R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMP $8, R_TMP2 + BGE fixUpSlowForwardCopy + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_DST) + SUB R_TMP2, R_LEN, R_LEN + ADD R_TMP2, R_DST, R_DST + ADD R_TMP2, R_TMP2, R_TMP2 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R_DST, R_TMP0 + ADD R_LEN, R_DST, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R_LEN + BLE loop + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_TMP0) + ADD $8, R_TMP3, R_TMP3 + ADD $8, R_TMP0, R_TMP0 + SUB $8, R_LEN, R_LEN + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + ADD $1, R_TMP3, R_TMP3 + ADD $1, R_DST, R_DST + SUB $1, R_LEN, R_LEN + CBNZ R_LEN, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R_DEND, R_DST + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R_TMP0 + MOVD R_TMP0, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go new file mode 100644 index 0000000000000..cb3576edd470b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64) && !appengine && gc && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !noasm + +package s2 + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go new file mode 100644 index 0000000000000..1074ebd215e1b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -0,0 +1,267 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm64 appengine !gc noasm + +package s2 + +import ( + "fmt" + "strconv" +) + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2Decode(dst, src []byte) int { + const debug = false + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := 0 + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + s += 3 + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go new file mode 100644 index 0000000000000..59f992ca6ebea --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -0,0 +1,1347 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/bits" + "runtime" + "sync" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlock(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBetter(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBest(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappy(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBetterSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBestSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. +// If the destination is nil or too small, a new will be allocated. +// The blocks are not validated, so garbage in = garbage out. +// dst may not overlap block data. +// Any data in dst is preserved as is, so it will not be considered a block. +func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { + totalSize := uint64(0) + compSize := 0 + for _, b := range blocks { + l, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + totalSize += uint64(l) + compSize += len(b) - hdr + } + if totalSize == 0 { + dst = append(dst, 0) + return dst, nil + } + if totalSize > math.MaxUint32 { + return nil, ErrTooLarge + } + var tmp [binary.MaxVarintLen32]byte + hdrSize := binary.PutUvarint(tmp[:], totalSize) + wantSize := hdrSize + compSize + + if cap(dst)-len(dst) < wantSize { + dst = append(make([]byte, 0, wantSize+len(dst)), dst...) + } + dst = append(dst, tmp[:hdrSize]...) + for _, b := range blocks { + _, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + dst = append(dst, b[hdr:]...) + } + return dst, nil +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 8 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// will be accepted by the encoder. +const minNonLiteralBlockSize = 32 + +// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. +// Blocks this big are highly discouraged, though. +const MaxBlockSize = math.MaxUint32 - binary.MaxVarintLen32 - 5 + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +// 32 bit platforms will have lower thresholds for rejecting big content. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + // Also includes negative. + return -1 + } + // Size of the varint encoded block size. + n = n + uint64((bits.Len64(n)+7)/7) + + // Add maximum size of encoding block as literals. + n += uint64(literalExtraSize(int64(srcLen))) + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("s2: Writer is closed") + +// NewWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// Users must call Close to guarantee all data has been forwarded to +// the underlying io.Writer and that resources are released. +// They may also call Flush zero or more times before calling Close. +func NewWriter(w io.Writer, opts ...WriterOption) *Writer { + w2 := Writer{ + blockSize: defaultBlockSize, + concurrency: runtime.GOMAXPROCS(0), + randSrc: rand.Reader, + level: levelFast, + } + for _, opt := range opts { + if err := opt(&w2); err != nil { + w2.errState = err + return &w2 + } + } + w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) + w2.paramsOK = true + w2.ibuf = make([]byte, 0, w2.blockSize) + w2.buffers.New = func() interface{} { + return make([]byte, w2.obufLen) + } + w2.Reset(w) + return &w2 +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + errMu sync.Mutex + errState error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + ibuf []byte + + blockSize int + obufLen int + concurrency int + written int64 + uncompWritten int64 // Bytes sent to compression + output chan chan result + buffers sync.Pool + pad int + + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool + paramsOK bool + snappy bool + flushOnWrite bool + appendIndex bool + level uint8 +} + +const ( + levelUncompressed = iota + 1 + levelFast + levelBetter + levelBest +) + +type result struct { + b []byte + // Uncompressed start offset + startOffset int64 +} + +// err returns the previously set error. +// If no error has been set it is set to err if not nil. +func (w *Writer) err(err error) error { + w.errMu.Lock() + errSet := w.errState + if errSet == nil && err != nil { + w.errState = err + errSet = err + } + w.errMu.Unlock() + return errSet +} + +// Reset discards the writer's state and switches the Snappy writer to write to w. +// This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + if !w.paramsOK { + return + } + // Close previous writer, if any. + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + w.errState = nil + w.ibuf = w.ibuf[:0] + w.wroteStreamHeader = false + w.written = 0 + w.writer = writer + w.uncompWritten = 0 + w.index.reset(w.blockSize) + + // If we didn't get a writer, stop here. + if writer == nil { + return + } + // If no concurrency requested, don't spin up writer goroutine. + if w.concurrency == 1 { + return + } + + toWrite := make(chan chan result, w.concurrency) + w.output = toWrite + w.writerWg.Add(1) + + // Start a writer goroutine that will write all output in order. + go func() { + defer w.writerWg.Done() + + // Get a queued write. + for write := range toWrite { + // Wait for the data to be available. + input := <-write + in := input.b + if len(in) > 0 { + if w.err(nil) == nil { + // Don't expose data from previous buffers. + toWrite := in[:len(in):len(in)] + // Write to output. + n, err := writer.Write(toWrite) + if err == nil && n != len(toWrite) { + err = io.ErrShortBuffer + } + _ = w.err(err) + w.err(w.index.add(w.written, input.startOffset)) + w.written += int64(n) + } + } + if cap(in) >= w.obufLen { + w.buffers.Put(in) + } + // close the incoming write request. + // This can be used for synchronizing flushes. + close(write) + } + }() +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.flushOnWrite { + return w.write(p) + } + // If we exceed the input buffer size, start writing + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + } + nRet += n + p = p[n:] + } + if err := w.err(nil); err != nil { + return nRet, err + } + // p should always be able to fit into w.ibuf now. + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +// ReadFrom implements the io.ReaderFrom interface. +// Using this is typically more efficient since it avoids a memory copy. +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if err := w.err(nil); err != nil { + return 0, err + } + if len(w.ibuf) > 0 { + err := w.Flush() + if err != nil { + return 0, err + } + } + if br, ok := r.(byter); ok { + buf := br.Bytes() + if err := w.EncodeBuffer(buf); err != nil { + return 0, err + } + return int64(len(buf)), w.Flush() + } + for { + inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] + n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + if err != io.EOF { + return n, w.err(err) + } + } + if n2 == 0 { + break + } + n += int64(n2) + err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) + if w.err(err2) != nil { + break + } + + if err != nil { + // We got EOF and wrote everything + break + } + } + + return n, w.err(nil) +} + +// AddSkippableBlock will add a skippable block to the stream. +// The ID must be 0x80-0xfe (inclusive). +// Length of the skippable block must be <= 16777215 bytes. +func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + if len(data) == 0 { + return nil + } + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("invalid skippable block id %x", id) + } + if len(data) > maxChunkSize { + return fmt.Errorf("skippable block excessed maximum size") + } + var header [4]byte + chunkLen := 4 + len(data) + header[0] = id + header[1] = uint8(chunkLen >> 0) + header[2] = uint8(chunkLen >> 8) + header[3] = uint8(chunkLen >> 16) + if w.concurrency == 1 { + write := func(b []byte) error { + n, err := w.writer.Write(b) + if err = w.err(err); err != nil { + return err + } + if n != len(data) { + return w.err(io.ErrShortWrite) + } + w.written += int64(n) + return w.err(nil) + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + if w.snappy { + if err := write([]byte(magicChunkSnappy)); err != nil { + return err + } + } else { + if err := write([]byte(magicChunk)); err != nil { + return err + } + } + } + if err := write(header[:]); err != nil { + return err + } + if err := write(data); err != nil { + return err + } + } + + // Create output... + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + // Copy input. + inbuf := w.buffers.Get().([]byte)[:4] + copy(inbuf, header[:]) + inbuf = append(inbuf, data...) + + output := make(chan result, 1) + // Queue output. + w.output <- output + output <- result{startOffset: w.uncompWritten, b: inbuf} + + return nil +} + +// EncodeBuffer will add a buffer to the stream. +// This is the fastest way to encode a stream, +// but the input buffer cannot be written to by the caller +// until Flush or Close has been called when concurrency != 1. +// +// If you cannot control that, use the regular Write function. +// +// Note that input is not buffered. +// This means that each write will result in discrete blocks being created. +// For buffered writes, use the regular Write function. +func (w *Writer) EncodeBuffer(buf []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + + if w.flushOnWrite { + _, err := w.write(buf) + return err + } + // Flush queued data first. + if len(w.ibuf) > 0 { + err := w.Flush() + if err != nil { + return err + } + } + if w.concurrency == 1 { + _, err := w.writeSync(buf) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + for len(buf) > 0 { + // Cut input. + uncompressed := buf + if len(uncompressed) > w.blockSize { + uncompressed = uncompressed[:w.blockSize] + } + buf = buf[len(uncompressed):] + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // copy uncompressed + copy(obuf[obufHeaderLen:], uncompressed) + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + }() + } + return nil +} + +func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.snappy { + switch w.level { + case levelFast: + return encodeBlockSnappy(obuf, uncompressed) + case levelBetter: + return encodeBlockBetterSnappy(obuf, uncompressed) + case levelBest: + return encodeBlockBestSnappy(obuf, uncompressed) + } + return 0 + } + switch w.level { + case levelFast: + return encodeBlock(obuf, uncompressed) + case levelBetter: + return encodeBlockBetter(obuf, uncompressed) + case levelBest: + return encodeBlockBest(obuf, uncompressed) + } + return 0 +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.concurrency == 1 { + return w.writeSync(p) + } + + // Spawn goroutine and write block to output channel. + for len(p) > 0 { + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + // Copy input. + // If the block is incompressible, this is used for the result. + inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + obuf := w.buffers.Get().([]byte)[:w.obufLen] + copy(inbuf[obufHeaderLen:], uncompressed) + uncompressed = inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + nRet += len(uncompressed) + } + return nRet, nil +} + +// writeFull is a special version of write that will always write the full buffer. +// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. +// The data will be written as a single block. +// The caller is not allowed to use inbuf after this function has been called. +func (w *Writer) writeFull(inbuf []byte) (errRet error) { + if err := w.err(nil); err != nil { + return err + } + + if w.concurrency == 1 { + _, err := w.writeSync(inbuf[obufHeaderLen:]) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:w.obufLen] + uncompressed := inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + return nil +} + +func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + var n int + var err error + if w.snappy { + n, err = w.writer.Write([]byte(magicChunkSnappy)) + } else { + n, err = w.writer.Write([]byte(magicChunk)) + } + if err != nil { + return 0, w.err(err) + } + if n != len(magicChunk) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + + for len(p) > 0 { + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + obuf := w.buffers.Get().([]byte)[:w.obufLen] + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + obuf = obuf[:8] + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + n, err := w.writer.Write(obuf) + if err != nil { + return 0, w.err(err) + } + if n != len(obuf) { + return 0, w.err(io.ErrShortWrite) + } + w.err(w.index.add(w.written, w.uncompWritten)) + w.written += int64(n) + w.uncompWritten += int64(len(uncompressed)) + + if chunkType == chunkTypeUncompressedData { + // Write uncompressed data. + n, err := w.writer.Write(uncompressed) + if err != nil { + return 0, w.err(err) + } + if n != len(uncompressed) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + w.buffers.Put(obuf) + // Queue final output. + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.err(nil); err != nil { + return err + } + + // Queue any data still in input buffer. + if len(w.ibuf) != 0 { + if !w.wroteStreamHeader { + _, err := w.writeSync(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err(err) + } else { + _, err := w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + err = w.err(err) + if err != nil { + return err + } + } + } + if w.output == nil { + return w.err(nil) + } + + // Send empty buffer + res := make(chan result) + w.output <- res + // Block until this has been picked up. + res <- result{b: nil, startOffset: w.uncompWritten} + // When it is closed, we have flushed. + <-res + return w.err(nil) +} + +// Close calls Flush and then closes the Writer. +// Calling Close multiple times is ok, +// but calling CloseIndex after this will make it not return the index. +func (w *Writer) Close() error { + _, err := w.closeIndex(w.appendIndex) + return err +} + +// CloseIndex calls Close and returns an index on first call. +// This is not required if you are only adding index to a stream. +func (w *Writer) CloseIndex() ([]byte, error) { + return w.closeIndex(true) +} + +func (w *Writer) closeIndex(idx bool) ([]byte, error) { + err := w.Flush() + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + + var index []byte + if w.err(nil) == nil && w.writer != nil { + // Create index. + if idx { + compSize := int64(-1) + if w.pad <= 1 { + compSize = w.written + } + index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) + // Count as written for padding. + if w.appendIndex { + w.written += int64(len(index)) + } + if true { + _, err := w.index.Load(index) + if err != nil { + panic(err) + } + } + } + + if w.pad > 1 { + tmp := w.ibuf[:0] + if len(index) > 0 { + // Allocate another buffer. + tmp = w.buffers.Get().([]byte)[:0] + defer w.buffers.Put(tmp) + } + add := calcSkippableFrame(w.written, int64(w.pad)) + frame, err := skippableFrame(tmp, add, w.randSrc) + if err = w.err(err); err != nil { + return nil, err + } + n, err2 := w.writer.Write(frame) + if err2 == nil && n != len(frame) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + if len(index) > 0 && w.appendIndex { + n, err2 := w.writer.Write(index) + if err2 == nil && n != len(index) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + } + err = w.err(errClosed) + if err == errClosed { + return index, nil + } + return nil, err +} + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) + } + if int64(total) >= maxBlockSize+skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) + } + // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" + dst = append(dst, chunkTypePadding) + f := uint32(total - skippableFrameHeader) + // Add chunk length. + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) + // Add data + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} + +// WriterOption is an option for creating a encoder. +type WriterOption func(*Writer) error + +// WriterConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WriterConcurrency(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + w.concurrency = n + return nil + } +} + +// WriterAddIndex will append an index to the end of a stream +// when it is closed. +func WriterAddIndex() WriterOption { + return func(w *Writer) error { + w.appendIndex = true + return nil + } +} + +// WriterBetterCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +func WriterBetterCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBetter + return nil + } +} + +// WriterBestCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// big speed decrease on compression. +func WriterBestCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBest + return nil + } +} + +// WriterUncompressed will bypass compression. +// The stream will be written as uncompressed blocks only. +// If concurrency is > 1 CRC and output will still be done async. +func WriterUncompressed() WriterOption { + return func(w *Writer) error { + w.level = levelUncompressed + return nil + } +} + +// WriterBlockSize allows to override the default block size. +// Blocks will be this size or smaller. +// Minimum size is 4KB and and maximum size is 4MB. +// +// Bigger blocks may give bigger throughput on systems with many cores, +// and will increase compression slightly, but it will limit the possible +// concurrency for smaller payloads for both encoding and decoding. +// Default block size is 1MB. +// +// When writing Snappy compatible output using WriterSnappyCompat, +// the maximum block size is 64KB. +func WriterBlockSize(n int) WriterOption { + return func(w *Writer) error { + if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") + } + if n > maxBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") + } + w.blockSize = n + return nil + } +} + +// WriterPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 4MB. +// The padded area will be filled with data from crypto/rand.Reader. +// The padding will be applied whenever Close is called on the writer. +func WriterPadding(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return fmt.Errorf("s2: padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + w.pad = 0 + } + if n > maxBlockSize { + return fmt.Errorf("s2: padding must less than 4MB") + } + w.pad = n + return nil + } +} + +// WriterPaddingSrc will get random data for padding from the supplied source. +// By default crypto/rand is used. +func WriterPaddingSrc(reader io.Reader) WriterOption { + return func(w *Writer) error { + w.randSrc = reader + return nil + } +} + +// WriterSnappyCompat will write snappy compatible output. +// The output can be decompressed using either snappy or s2. +// If block size is more than 64KB it is set to that. +func WriterSnappyCompat() WriterOption { + return func(w *Writer) error { + w.snappy = true + if w.blockSize > 64<<10 { + // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. + // And allows us to skip some size checks. + w.blockSize = (64 << 10) - 8 + } + return nil + } +} + +// WriterFlushOnWrite will compress blocks on each call to the Write function. +// +// This is quite inefficient as blocks size will depend on the write size. +// +// Use WriterConcurrency(1) to also make sure that output is flushed. +// When Write calls return, otherwise they will be written when compression is done. +func WriterFlushOnWrite() WriterOption { + return func(w *Writer) error { + w.flushOnWrite = true + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go new file mode 100644 index 0000000000000..8b16c38a68f63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "math/bits" +) + +func load32(b []byte, i int) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + const prime6bytes = 227718039650203 + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +func encodeGo(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockGo(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockSnappyGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go new file mode 100644 index 0000000000000..e612225f4d358 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -0,0 +1,142 @@ +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) >= 4<<20 { + return encodeBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockAsm8B(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetter(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) > 4<<20 { + return encodeBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBetterBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBetterBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockSnappy(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBetterBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBetterBlockAsm8B(dst, src) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go new file mode 100644 index 0000000000000..4480347769a04 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -0,0 +1,604 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "fmt" + "math/bits" +) + +// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + rep bool + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + if m.rep { + return score - emitRepeatSize(offset, m.length) + } + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32, rep bool) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset, rep: rep} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if !best.rep { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 && !best.rep { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if best.rep { + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], offset, best.length) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], offset, best.length) + } + } else { + d += emitCopy(dst[d:], offset, best.length) + } + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBestSnappy(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if true { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, best.length) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// emitCopySize returns the size to encode the offset+length +// +// It assumes that: +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopySize(offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeatSize(offset, length) + } + i = 5 + } + if length == 0 { + return i + } + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeatSize(offset, length-60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitRepeatSize returns the number of bytes required to encode a repeat. +// Length must be at least 4 and < 1<<24 +func emitRepeatSize(offset, length int) int { + // Repeat offset, make length cheaper + if length <= 4+4 || (length < 8+4 && offset < 2048) { + return 2 + } + if length < (1<<8)+4+4 { + return 3 + } + if length < (1<<16)+(1<<8)+4 { + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= (1 << 16) - 4 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + if left > 0 { + return 5 + emitRepeatSize(offset, left) + } + return 5 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go new file mode 100644 index 0000000000000..943215b8ae86b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -0,0 +1,431 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "math/bits" +) + +// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint64, h uint8) uint32 { + const prime4bytes = 2654435761 + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + const prime5bytes = 889523592379 + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + const prime7bytes = 58295818150454627 + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + const prime8bytes = 0xcf1bbcdcb7a56463 + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + // Check repeat at offset checkRep. + const checkRep = 1 + if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Index match start+1 (long) and start+2 (short) + index0 := base + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + cv = load64(src, s) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) + lTable[hash7(cv1, lTableBits)] = uint32(index1) + lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = (s-nextEmit)>>7 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Index match start+1 (long) and start+2 (short) + index0 := base + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + cv = load64(src, s) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) + lTable[hash7(cv1, lTableBits)] = uint32(index1) + lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go new file mode 100644 index 0000000000000..43d43534e4f4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -0,0 +1,298 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package s2 + +import ( + "math/bits" +) + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlock(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetter(dst, src []byte) (d int) { + return encodeBlockBetterGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + return encodeBlockBetterSnappyGo(dst, src) +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockSnappy(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockSnappyGo(dst, src) +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteral(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + const num = 63<<2 | tagLiteral + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat(dst []byte, offset, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopy(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeat(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitCopyNoRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitCopyNoRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// len(a) <= len(b) +// +func matchLen(a []byte, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go new file mode 100644 index 0000000000000..c8cf7b69e81d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -0,0 +1,189 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm(dst []byte, src []byte) int + +// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm4MB(dst []byte, src []byte) int + +// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm12B(dst []byte, src []byte) int + +// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm10B(dst []byte, src []byte) int + +// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm8B(dst []byte, src []byte) int + +// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm(dst []byte, src []byte) int + +// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm4MB(dst []byte, src []byte) int + +// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +//go:noescape +func emitLiteral(dst []byte, lit []byte) int + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<32 +// +//go:noescape +func emitRepeat(dst []byte, offset int, length int) int + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopy(dst []byte, offset int, length int) int + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopyNoRepeat(dst []byte, offset int, length int) int + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// len(a) <= len(b) +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s new file mode 100644 index 0000000000000..1ac65a0e352fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -0,0 +1,15678 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +#include "textflag.h" + +// func encodeBlockAsm(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm + +repeat_extend_back_loop_encodeBlockAsm: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm + +repeat_extend_back_end_encodeBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_repeat_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +four_bytes_repeat_emit_encodeBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +three_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +two_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm + JMP memmove_long_repeat_emit_encodeBlockAsm + +one_byte_repeat_emit_encodeBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm + +memmove_long_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_single_repeat_extend_encodeBlockAsm + +matchlen_loopback_repeat_extend_encodeBlockAsm: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_loop_repeat_extend_encodeBlockAsm: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm + +matchlen_single_repeat_extend_encodeBlockAsm: + TESTL R9, R9 + JZ repeat_extend_forward_end_encodeBlockAsm + +matchlen_single_loopback_repeat_extend_encodeBlockAsm: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm + LEAL 1(R12), R12 + DECL R9 + JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm + +repeat_extend_forward_end_encodeBlockAsm: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_repeat_encodeBlockAsm: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm + +cant_repeat_two_offset_match_repeat_encodeBlockAsm: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm + CMPL SI, $0x00010100 + JLT repeat_four_match_repeat_encodeBlockAsm + CMPL SI, $0x0100ffff + JLT repeat_five_match_repeat_encodeBlockAsm + LEAL -16842747(SI), SI + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_repeat_encodeBlockAsm + +repeat_five_match_repeat_encodeBlockAsm: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_match_repeat_encodeBlockAsm: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_match_repeat_encodeBlockAsm: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_match_repeat_encodeBlockAsm: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_match_repeat_encodeBlockAsm: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_as_copy_encodeBlockAsm: + // emitCopy + CMPL DI, $0x00010000 + JL two_byte_offset_repeat_as_copy_encodeBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x40 + JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(SI), SI + ADDQ $0x05, AX + CMPL SI, $0x04 + JL four_bytes_remain_repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x0100ffff + JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy + LEAL -16842747(SI), SI + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm + +four_bytes_remain_repeat_as_copy_encodeBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x0100ffff + JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short + LEAL -16842747(SI), SI + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +emit_copy_three_repeat_as_copy_encodeBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm + +no_repeat_found_encodeBlockAsm: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm + +candidate3_match_encodeBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm + +candidate2_match_encodeBlockAsm: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm + +match_extend_back_loop_encodeBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm + JMP match_extend_back_loop_encodeBlockAsm + +match_extend_back_end_encodeBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x00010000 + JLT three_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x01000000 + JLT four_bytes_match_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBlockAsm + +four_bytes_match_emit_encodeBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW R8, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm + +three_bytes_match_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm + +two_bytes_match_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm + JMP memmove_long_match_emit_encodeBlockAsm + +one_byte_match_emit_encodeBlockAsm: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm + +memmove_long_match_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm: +match_nolit_loop_encodeBlockAsm: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeBlockAsm + +matchlen_loopback_match_nolit_encodeBlockAsm: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm + +matchlen_loop_match_nolit_encodeBlockAsm: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm + +matchlen_single_match_nolit_encodeBlockAsm: + TESTL DI, DI + JZ match_nolit_end_encodeBlockAsm + +matchlen_single_loopback_match_nolit_encodeBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm + +match_nolit_end_encodeBlockAsm: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_match_nolit_encodeBlockAsm + +four_bytes_loop_back_match_nolit_encodeBlockAsm: + CMPL R10, $0x40 + JLE four_bytes_remain_match_nolit_encodeBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(R10), R10 + ADDQ $0x05, AX + CMPL R10, $0x04 + JL four_bytes_remain_match_nolit_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x0100ffff + JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy + LEAL -16842747(R10), R10 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBlockAsm_emit_copy: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeBlockAsm + +four_bytes_remain_match_nolit_encodeBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_match_nolit_encodeBlockAsm: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x0100ffff + JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short + LEAL -16842747(R10), R10 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + JMP two_byte_offset_match_nolit_encodeBlockAsm + +two_byte_offset_short_match_nolit_encodeBlockAsm: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +emit_copy_three_match_nolit_encodeBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm + INCL CX + JMP search_loop_encodeBlockAsm + +emit_remainder_encodeBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +four_bytes_emit_remainder_encodeBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +three_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +two_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm + JMP memmove_long_emit_remainder_encodeBlockAsm + +one_byte_emit_remainder_encodeBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm + +memmove_long_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm4MB(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBlockAsm4MB(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm4MB + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm4MB + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm4MB + +repeat_extend_back_loop_encodeBlockAsm4MB: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm4MB + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm4MB + +repeat_extend_back_end_encodeBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00010000 + JLT three_bytes_repeat_emit_encodeBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +three_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +two_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm4MB + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +one_byte_repeat_emit_encodeBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm4MB: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB + +memmove_long_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm4MB: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_single_repeat_extend_encodeBlockAsm4MB + +matchlen_loopback_repeat_extend_encodeBlockAsm4MB: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_loop_repeat_extend_encodeBlockAsm4MB: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm4MB + +matchlen_single_repeat_extend_encodeBlockAsm4MB: + TESTL R9, R9 + JZ repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_single_loopback_repeat_extend_encodeBlockAsm4MB: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm4MB + LEAL 1(R12), R12 + DECL R9 + JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm4MB + +repeat_extend_forward_end_encodeBlockAsm4MB: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm4MB + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm4MB + +cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm4MB + CMPL SI, $0x00010100 + JLT repeat_four_match_repeat_encodeBlockAsm4MB + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_match_repeat_encodeBlockAsm4MB: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_match_repeat_encodeBlockAsm4MB: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_match_repeat_encodeBlockAsm4MB: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_match_repeat_encodeBlockAsm4MB: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_as_copy_encodeBlockAsm4MB: + // emitCopy + CMPL DI, $0x00010000 + JL two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + +four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x40 + JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(SI), SI + ADDQ $0x05, AX + CMPL SI, $0x04 + JL four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB + +four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm4MB + MOVB $0x03, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm4MB: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm4MB + +no_repeat_found_encodeBlockAsm4MB: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm4MB + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm4MB + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm4MB + +candidate3_match_encodeBlockAsm4MB: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm4MB + +candidate2_match_encodeBlockAsm4MB: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm4MB + +match_extend_back_loop_encodeBlockAsm4MB: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm4MB + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm4MB + JMP match_extend_back_loop_encodeBlockAsm4MB + +match_extend_back_end_encodeBlockAsm4MB: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 4(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm4MB: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00010000 + JLT three_bytes_match_emit_encodeBlockAsm4MB + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW R8, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +three_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +two_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm4MB + JMP memmove_long_match_emit_encodeBlockAsm4MB + +one_byte_match_emit_encodeBlockAsm4MB: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm4MB: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm4MB + +memmove_long_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm4MB: +match_nolit_loop_encodeBlockAsm4MB: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeBlockAsm4MB + +matchlen_loopback_match_nolit_encodeBlockAsm4MB: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm4MB + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_loop_match_nolit_encodeBlockAsm4MB: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm4MB + +matchlen_single_match_nolit_encodeBlockAsm4MB: + TESTL DI, DI + JZ match_nolit_end_encodeBlockAsm4MB + +matchlen_single_loopback_match_nolit_encodeBlockAsm4MB: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm4MB + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm4MB + +match_nolit_end_encodeBlockAsm4MB: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_match_nolit_encodeBlockAsm4MB + +four_bytes_loop_back_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x40 + JLE four_bytes_remain_match_nolit_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(R10), R10 + ADDQ $0x05, AX + CMPL R10, $0x04 + JL four_bytes_remain_match_nolit_encodeBlockAsm4MB + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + JMP four_bytes_loop_back_match_nolit_encodeBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBlockAsm4MB: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm4MB + MOVB $0x03, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm4MB + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + JMP two_byte_offset_match_nolit_encodeBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm4MB + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +emit_copy_three_match_nolit_encodeBlockAsm4MB: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm4MB: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm4MB + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm4MB: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm4MB + INCL CX + JMP search_loop_encodeBlockAsm4MB + +emit_remainder_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +three_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +two_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +one_byte_emit_remainder_encodeBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB + +memmove_long_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm12B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm12B + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm12B + +repeat_extend_back_loop_encodeBlockAsm12B: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm12B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm12B + +repeat_extend_back_end_encodeBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +two_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm12B + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +one_byte_repeat_emit_encodeBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm12B + +memmove_long_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm12B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_single_repeat_extend_encodeBlockAsm12B + +matchlen_loopback_repeat_extend_encodeBlockAsm12B: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm12B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_loop_repeat_extend_encodeBlockAsm12B: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm12B + +matchlen_single_repeat_extend_encodeBlockAsm12B: + TESTL R9, R9 + JZ repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_single_loopback_repeat_extend_encodeBlockAsm12B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm12B + LEAL 1(R12), R12 + DECL R9 + JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm12B + +repeat_extend_forward_end_encodeBlockAsm12B: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm12B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm12B + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm12B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm12B + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_match_repeat_encodeBlockAsm12B: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_match_repeat_encodeBlockAsm12B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_match_repeat_encodeBlockAsm12B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_as_copy_encodeBlockAsm12B: + // emitCopy +two_byte_offset_repeat_as_copy_encodeBlockAsm12B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm12B + +no_repeat_found_encodeBlockAsm12B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm12B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm12B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm12B + +candidate3_match_encodeBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm12B + +candidate2_match_encodeBlockAsm12B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm12B + +match_extend_back_loop_encodeBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm12B + JMP match_extend_back_loop_encodeBlockAsm12B + +match_extend_back_end_encodeBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm12B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm12B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm12B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm12B + +two_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm12B + JMP memmove_long_match_emit_encodeBlockAsm12B + +one_byte_match_emit_encodeBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm12B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm12B + +memmove_long_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm12B: +match_nolit_loop_encodeBlockAsm12B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeBlockAsm12B + +matchlen_loopback_match_nolit_encodeBlockAsm12B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm12B + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_loop_match_nolit_encodeBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm12B + +matchlen_single_match_nolit_encodeBlockAsm12B: + TESTL DI, DI + JZ match_nolit_end_encodeBlockAsm12B + +matchlen_single_loopback_match_nolit_encodeBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm12B + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm12B + +match_nolit_end_encodeBlockAsm12B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBlockAsm12B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm12B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + JMP two_byte_offset_match_nolit_encodeBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBlockAsm12B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm12B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +emit_copy_three_match_nolit_encodeBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm12B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm12B + INCL CX + JMP search_loop_encodeBlockAsm12B + +emit_remainder_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +two_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm12B + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +one_byte_emit_remainder_encodeBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm12B + +memmove_long_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm10B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm10B + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm10B + +repeat_extend_back_loop_encodeBlockAsm10B: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm10B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm10B + +repeat_extend_back_end_encodeBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +two_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm10B + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +one_byte_repeat_emit_encodeBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm10B + +memmove_long_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm10B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_single_repeat_extend_encodeBlockAsm10B + +matchlen_loopback_repeat_extend_encodeBlockAsm10B: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm10B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_loop_repeat_extend_encodeBlockAsm10B: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm10B + +matchlen_single_repeat_extend_encodeBlockAsm10B: + TESTL R9, R9 + JZ repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_single_loopback_repeat_extend_encodeBlockAsm10B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm10B + LEAL 1(R12), R12 + DECL R9 + JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm10B + +repeat_extend_forward_end_encodeBlockAsm10B: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm10B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm10B + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm10B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm10B + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_match_repeat_encodeBlockAsm10B: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_match_repeat_encodeBlockAsm10B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_match_repeat_encodeBlockAsm10B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_as_copy_encodeBlockAsm10B: + // emitCopy +two_byte_offset_repeat_as_copy_encodeBlockAsm10B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm10B + +no_repeat_found_encodeBlockAsm10B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm10B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm10B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm10B + +candidate3_match_encodeBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm10B + +candidate2_match_encodeBlockAsm10B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm10B + +match_extend_back_loop_encodeBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm10B + JMP match_extend_back_loop_encodeBlockAsm10B + +match_extend_back_end_encodeBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm10B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm10B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm10B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm10B + +two_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm10B + JMP memmove_long_match_emit_encodeBlockAsm10B + +one_byte_match_emit_encodeBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm10B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm10B + +memmove_long_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm10B: +match_nolit_loop_encodeBlockAsm10B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeBlockAsm10B + +matchlen_loopback_match_nolit_encodeBlockAsm10B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm10B + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_loop_match_nolit_encodeBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm10B + +matchlen_single_match_nolit_encodeBlockAsm10B: + TESTL DI, DI + JZ match_nolit_end_encodeBlockAsm10B + +matchlen_single_loopback_match_nolit_encodeBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm10B + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm10B + +match_nolit_end_encodeBlockAsm10B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBlockAsm10B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm10B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + JMP two_byte_offset_match_nolit_encodeBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBlockAsm10B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm10B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +emit_copy_three_match_nolit_encodeBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm10B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm10B + INCL CX + JMP search_loop_encodeBlockAsm10B + +emit_remainder_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +two_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm10B + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +one_byte_emit_remainder_encodeBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm10B + +memmove_long_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm8B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm8B + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm8B + +repeat_extend_back_loop_encodeBlockAsm8B: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm8B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm8B + +repeat_extend_back_end_encodeBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +two_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm8B + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +one_byte_repeat_emit_encodeBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm8B + +memmove_long_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm8B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_single_repeat_extend_encodeBlockAsm8B + +matchlen_loopback_repeat_extend_encodeBlockAsm8B: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm8B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_loop_repeat_extend_encodeBlockAsm8B: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm8B + +matchlen_single_repeat_extend_encodeBlockAsm8B: + TESTL R9, R9 + JZ repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_single_loopback_repeat_extend_encodeBlockAsm8B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm8B + LEAL 1(R12), R12 + DECL R9 + JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm8B + +repeat_extend_forward_end_encodeBlockAsm8B: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm8B + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm8B + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm8B + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_match_repeat_encodeBlockAsm8B: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_match_repeat_encodeBlockAsm8B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_as_copy_encodeBlockAsm8B: + // emitCopy +two_byte_offset_repeat_as_copy_encodeBlockAsm8B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm8B + +no_repeat_found_encodeBlockAsm8B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm8B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm8B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm8B + +candidate3_match_encodeBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm8B + +candidate2_match_encodeBlockAsm8B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm8B + +match_extend_back_loop_encodeBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm8B + JMP match_extend_back_loop_encodeBlockAsm8B + +match_extend_back_end_encodeBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm8B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm8B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm8B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm8B + +two_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm8B + JMP memmove_long_match_emit_encodeBlockAsm8B + +one_byte_match_emit_encodeBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm8B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm8B + +memmove_long_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm8B: +match_nolit_loop_encodeBlockAsm8B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeBlockAsm8B + +matchlen_loopback_match_nolit_encodeBlockAsm8B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm8B + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_loop_match_nolit_encodeBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm8B + +matchlen_single_match_nolit_encodeBlockAsm8B: + TESTL DI, DI + JZ match_nolit_end_encodeBlockAsm8B + +matchlen_single_loopback_match_nolit_encodeBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm8B + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm8B + +match_nolit_end_encodeBlockAsm8B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBlockAsm8B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm8B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, SI + LEAL -4(R10), R10 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + JMP two_byte_offset_match_nolit_encodeBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBlockAsm8B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +emit_copy_three_match_nolit_encodeBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm8B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm8B + INCL CX + JMP search_loop_encodeBlockAsm8B + +emit_remainder_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +two_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm8B + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +one_byte_emit_remainder_encodeBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm8B + +memmove_long_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBetterBlockAsm(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JLE check_maxskip_ok_encodeBetterBlockAsm + LEAL 100(CX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm + +check_maxskip_ok_encodeBetterBlockAsm: + LEAL 1(CX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm: + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm + +candidateS_match_encodeBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm + +match_extend_back_loop_encodeBetterBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm + JMP match_extend_back_loop_encodeBetterBlockAsm + +match_extend_back_end_encodeBetterBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeBetterBlockAsm + +matchlen_loopback_match_nolit_encodeBetterBlockAsm: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_loop_match_nolit_encodeBetterBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm + +matchlen_single_match_nolit_encodeBetterBlockAsm: + TESTL R8, R8 + JZ match_nolit_end_encodeBetterBlockAsm + +matchlen_single_loopback_match_nolit_encodeBetterBlockAsm: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm + +match_nolit_end_encodeBetterBlockAsm: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm + CMPL R12, $0x01 + JG match_length_ok_encodeBetterBlockAsm + CMPL R8, $0x0000ffff + JLE match_length_ok_encodeBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm + +match_length_ok_encodeBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +four_bytes_match_emit_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +three_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +two_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm + JMP memmove_long_match_emit_encodeBetterBlockAsm + +one_byte_match_emit_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm + +memmove_long_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JL two_byte_offset_match_nolit_encodeBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x40 + JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm + MOVB $0xff, (AX) + MOVL R8, 1(AX) + LEAL -64(R12), R12 + ADDQ $0x05, AX + CMPL R12, $0x04 + JL four_bytes_remain_match_nolit_encodeBetterBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x0100ffff + JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy + LEAL -16842747(R12), R12 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm + +four_bytes_remain_match_nolit_encodeBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x0100ffff + JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short + LEAL -16842747(R12), R12 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +emit_copy_three_match_nolit_encodeBetterBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +match_is_repeat_encodeBetterBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_repeat_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +four_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +three_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +two_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +one_byte_match_emit_repeat_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + +memmove_long_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat +emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x0100ffff + JLT repeat_five_match_nolit_repeat_encodeBetterBlockAsm + LEAL -16842747(R12), R12 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm + +repeat_five_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm + +emit_remainder_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +four_bytes_emit_remainder_encodeBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +three_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +two_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +one_byte_emit_remainder_encodeBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm + +memmove_long_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JLE check_maxskip_ok_encodeBetterBlockAsm4MB + LEAL 100(CX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm4MB + +check_maxskip_ok_encodeBetterBlockAsm4MB: + LEAL 1(CX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm4MB: + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm4MB + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB + +candidateS_match_encodeBetterBlockAsm4MB: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm4MB + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + +match_extend_back_loop_encodeBetterBlockAsm4MB: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm4MB + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm4MB + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + JMP match_extend_back_loop_encodeBetterBlockAsm4MB + +match_extend_back_end_encodeBetterBlockAsm4MB: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 4(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm4MB: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeBetterBlockAsm4MB + +matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_loop_match_nolit_encodeBetterBlockAsm4MB: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB + +matchlen_single_match_nolit_encodeBetterBlockAsm4MB: + TESTL R8, R8 + JZ match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_single_loopback_match_nolit_encodeBetterBlockAsm4MB: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm4MB + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm4MB + +match_nolit_end_encodeBetterBlockAsm4MB: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x01 + JG match_length_ok_encodeBetterBlockAsm4MB + CMPL R8, $0x0000ffff + JLE match_length_ok_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm4MB + +match_length_ok_encodeBetterBlockAsm4MB: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +three_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +two_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +one_byte_match_emit_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB + +memmove_long_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm4MB: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JL two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + +four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x40 + JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xff, (AX) + MOVL R8, 1(AX) + LEAL -64(R12), R12 + ADDQ $0x05, AX + CMPL R12, $0x04 + JL four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + MOVB $0x03, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +match_is_repeat_encodeBetterBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + +memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm4MB + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm4MB: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm4MB + +emit_remainder_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBetterBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +three_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +two_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +one_byte_emit_remainder_encodeBetterBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + +memmove_long_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 65560(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 65560(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B + +candidateS_match_encodeBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm12B + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + +match_extend_back_loop_encodeBetterBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + JMP match_extend_back_loop_encodeBetterBlockAsm12B + +match_extend_back_end_encodeBetterBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm12B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeBetterBlockAsm12B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm12B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_loop_match_nolit_encodeBetterBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B + +matchlen_single_match_nolit_encodeBetterBlockAsm12B: + TESTL R8, R8 + JZ match_nolit_end_encodeBetterBlockAsm12B + +matchlen_single_loopback_match_nolit_encodeBetterBlockAsm12B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm12B + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm12B + +match_nolit_end_encodeBetterBlockAsm12B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm12B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +two_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +one_byte_match_emit_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B + +memmove_long_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm12B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBetterBlockAsm12B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeBetterBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +match_is_repeat_encodeBetterBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +one_byte_match_emit_repeat_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm12B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x34, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 65560(SP)(R11*4) + MOVL R15, 65560(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 65560(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm12B + +emit_remainder_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +two_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +one_byte_emit_remainder_encodeBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + +memmove_long_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 16408(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 16408(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B + +candidateS_match_encodeBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm10B + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + +match_extend_back_loop_encodeBetterBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + JMP match_extend_back_loop_encodeBetterBlockAsm10B + +match_extend_back_end_encodeBetterBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm10B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeBetterBlockAsm10B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm10B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_loop_match_nolit_encodeBetterBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B + +matchlen_single_match_nolit_encodeBetterBlockAsm10B: + TESTL R8, R8 + JZ match_nolit_end_encodeBetterBlockAsm10B + +matchlen_single_loopback_match_nolit_encodeBetterBlockAsm10B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm10B + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm10B + +match_nolit_end_encodeBetterBlockAsm10B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm10B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +two_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +one_byte_match_emit_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B + +memmove_long_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm10B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBetterBlockAsm10B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeBetterBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +match_is_repeat_encodeBetterBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +one_byte_match_emit_repeat_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm10B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x36, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 16408(SP)(R11*4) + MOVL R15, 16408(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 16408(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm10B + +emit_remainder_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +two_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +one_byte_emit_remainder_encodeBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + +memmove_long_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 4120(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 4120(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B + +candidateS_match_encodeBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm8B + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + +match_extend_back_loop_encodeBetterBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + JMP match_extend_back_loop_encodeBetterBlockAsm8B + +match_extend_back_end_encodeBetterBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm8B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeBetterBlockAsm8B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm8B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_loop_match_nolit_encodeBetterBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B + +matchlen_single_match_nolit_encodeBetterBlockAsm8B: + TESTL R8, R8 + JZ match_nolit_end_encodeBetterBlockAsm8B + +matchlen_single_loopback_match_nolit_encodeBetterBlockAsm8B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm8B + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm8B + +match_nolit_end_encodeBetterBlockAsm8B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm8B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +two_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +one_byte_match_emit_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B + +memmove_long_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm8B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBetterBlockAsm8B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeBetterBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +match_is_repeat_encodeBetterBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +one_byte_match_emit_repeat_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R11 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm8B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x38, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 4120(SP)(R11*4) + MOVL R15, 4120(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 4120(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm8B + +emit_remainder_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +two_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +one_byte_emit_remainder_encodeBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + +memmove_long_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm + +repeat_extend_back_loop_encodeSnappyBlockAsm: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm + +repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_repeat_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +four_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVL SI, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +three_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +two_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +one_byte_repeat_emit_encodeSnappyBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm + +memmove_long_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_single_repeat_extend_encodeSnappyBlockAsm + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm + BSFQ R10, R10 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm + +matchlen_single_repeat_extend_encodeSnappyBlockAsm: + TESTL R8, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm + LEAL 1(R11), R11 + DECL R8 + JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm + +repeat_extend_forward_end_encodeSnappyBlockAsm: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy + CMPL DI, $0x00010000 + JL two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JLE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(SI), SI + ADDQ $0x05, AX + CMPL SI, $0x04 + JL four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeSnappyBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm + +no_repeat_found_encodeSnappyBlockAsm: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm + +candidate3_match_encodeSnappyBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm + +candidate2_match_encodeSnappyBlockAsm: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm + +match_extend_back_loop_encodeSnappyBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm + JMP match_extend_back_loop_encodeSnappyBlockAsm + +match_extend_back_end_encodeSnappyBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00010000 + JLT three_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x01000000 + JLT four_bytes_match_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +four_bytes_match_emit_encodeSnappyBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW R8, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +three_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +two_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +one_byte_match_emit_encodeSnappyBlockAsm: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm + +memmove_long_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm: +match_nolit_loop_encodeSnappyBlockAsm: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBlockAsm + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_loop_match_nolit_encodeSnappyBlockAsm: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm + +matchlen_single_match_nolit_encodeSnappyBlockAsm: + TESTL DI, DI + JZ match_nolit_end_encodeSnappyBlockAsm + +matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm + +match_nolit_end_encodeSnappyBlockAsm: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_match_nolit_encodeSnappyBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JLE four_bytes_remain_match_nolit_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(R10), R10 + ADDQ $0x05, AX + CMPL R10, $0x04 + JL four_bytes_remain_match_nolit_encodeSnappyBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm + INCL CX + JMP search_loop_encodeSnappyBlockAsm + +emit_remainder_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +four_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +three_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +two_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +one_byte_emit_remainder_encodeSnappyBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm + +memmove_long_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm64K + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm64K + +repeat_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K + +repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm64K + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +two_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm64K + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +one_byte_repeat_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + +memmove_long_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_single_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K + BSFQ R10, R10 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_single_repeat_extend_encodeSnappyBlockAsm64K: + TESTL R8, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm64K: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K + LEAL 1(R11), R11 + DECL R8 + JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm64K + +repeat_extend_forward_end_encodeSnappyBlockAsm64K: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm64K + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm64K: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm64K + +no_repeat_found_encodeSnappyBlockAsm64K: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm64K + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm64K + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm64K + +candidate3_match_encodeSnappyBlockAsm64K: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm64K + +candidate2_match_encodeSnappyBlockAsm64K: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + +match_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBlockAsm64K + +match_extend_back_end_encodeSnappyBlockAsm64K: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm64K: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm64K + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm64K + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +two_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +one_byte_match_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K + +memmove_long_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm64K: +match_nolit_loop_encodeSnappyBlockAsm64K: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBlockAsm64K + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_loop_match_nolit_encodeSnappyBlockAsm64K: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K + +matchlen_single_match_nolit_encodeSnappyBlockAsm64K: + TESTL DI, DI + JZ match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm64K: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm64K + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm64K + +match_nolit_end_encodeSnappyBlockAsm64K: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm64K + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm64K: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm64K + INCL CX + JMP search_loop_encodeSnappyBlockAsm64K + +emit_remainder_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm64K + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm64K + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm12B + +repeat_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B + +repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +two_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm12B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +one_byte_repeat_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + +memmove_long_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_single_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B + BSFQ R10, R10 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_single_repeat_extend_encodeSnappyBlockAsm12B: + TESTL R8, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm12B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B + LEAL 1(R11), R11 + DECL R8 + JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm12B + +repeat_extend_forward_end_encodeSnappyBlockAsm12B: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm12B + +no_repeat_found_encodeSnappyBlockAsm12B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm12B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm12B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm12B + +candidate3_match_encodeSnappyBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm12B + +candidate2_match_encodeSnappyBlockAsm12B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + +match_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBlockAsm12B + +match_extend_back_end_encodeSnappyBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm12B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm12B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm12B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +two_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +one_byte_match_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B + +memmove_long_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm12B: +match_nolit_loop_encodeSnappyBlockAsm12B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBlockAsm12B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B + +matchlen_single_match_nolit_encodeSnappyBlockAsm12B: + TESTL DI, DI + JZ match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm12B + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm12B + +match_nolit_end_encodeSnappyBlockAsm12B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm12B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm12B + INCL CX + JMP search_loop_encodeSnappyBlockAsm12B + +emit_remainder_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm10B + +repeat_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B + +repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +two_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm10B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +one_byte_repeat_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + +memmove_long_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_single_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B + BSFQ R10, R10 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_single_repeat_extend_encodeSnappyBlockAsm10B: + TESTL R8, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm10B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B + LEAL 1(R11), R11 + DECL R8 + JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm10B + +repeat_extend_forward_end_encodeSnappyBlockAsm10B: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm10B + +no_repeat_found_encodeSnappyBlockAsm10B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm10B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm10B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm10B + +candidate3_match_encodeSnappyBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm10B + +candidate2_match_encodeSnappyBlockAsm10B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + +match_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBlockAsm10B + +match_extend_back_end_encodeSnappyBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm10B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm10B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm10B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +two_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +one_byte_match_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B + +memmove_long_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm10B: +match_nolit_loop_encodeSnappyBlockAsm10B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBlockAsm10B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B + +matchlen_single_match_nolit_encodeSnappyBlockAsm10B: + TESTL DI, DI + JZ match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm10B + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm10B + +match_nolit_end_encodeSnappyBlockAsm10B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm10B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm10B + INCL CX + JMP search_loop_encodeSnappyBlockAsm10B + +emit_remainder_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm8B + +repeat_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B + +repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +two_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm8B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +one_byte_repeat_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + +memmove_long_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_single_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B + BSFQ R10, R10 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_single_repeat_extend_encodeSnappyBlockAsm8B: + TESTL R8, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm8B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B + LEAL 1(R11), R11 + DECL R8 + JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm8B + +repeat_extend_forward_end_encodeSnappyBlockAsm8B: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm8B + +no_repeat_found_encodeSnappyBlockAsm8B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm8B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm8B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm8B + +candidate3_match_encodeSnappyBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm8B + +candidate2_match_encodeSnappyBlockAsm8B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + +match_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBlockAsm8B + +match_extend_back_end_encodeSnappyBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm8B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm8B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm8B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +two_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +one_byte_match_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B + +memmove_long_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm8B: +match_nolit_loop_encodeSnappyBlockAsm8B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBlockAsm8B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B + BSFQ R9, R9 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B + +matchlen_single_match_nolit_encodeSnappyBlockAsm8B: + TESTL DI, DI + JZ match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm8B + LEAL 1(R10), R10 + DECL DI + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm8B + +match_nolit_end_encodeSnappyBlockAsm8B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm8B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm8B + INCL CX + JMP search_loop_encodeSnappyBlockAsm8B + +emit_remainder_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JLE check_maxskip_ok_encodeSnappyBetterBlockAsm + LEAL 100(CX), SI + JMP check_maxskip_cont_encodeSnappyBetterBlockAsm + +check_maxskip_ok_encodeSnappyBetterBlockAsm: + LEAL 1(CX)(SI*1), SI + +check_maxskip_cont_encodeSnappyBetterBlockAsm: + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm + +candidateS_match_encodeSnappyBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + +match_extend_back_loop_encodeSnappyBetterBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm + +match_extend_back_end_encodeSnappyBetterBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_single_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R8, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm + +match_nolit_end_encodeSnappyBetterBlockAsm: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL R12, $0x01 + JG match_length_ok_encodeSnappyBetterBlockAsm + CMPL R8, $0x0000ffff + JLE match_length_ok_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeSnappyBetterBlockAsm + +match_length_ok_encodeSnappyBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +four_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +three_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +two_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +one_byte_match_emit_encodeSnappyBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + +memmove_long_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JL two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JLE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xff, (AX) + MOVL R8, 1(AX) + LEAL -64(R12), R12 + ADDQ $0x05, AX + CMPL R12, $0x04 + JL four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm + +emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K + +candidateS_match_encodeSnappyBetterBlockAsm64K: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + +match_extend_back_loop_encodeSnappyBetterBlockAsm64K: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm64K + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K + +match_extend_back_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm64K: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_single_match_nolit_encodeSnappyBetterBlockAsm64K: + TESTL R8, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm64K + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm64K + +match_nolit_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm64K + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm64K + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +one_byte_match_emit_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + +memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm64K + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm64K + +emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 65560(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 65560(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B + +candidateS_match_encodeSnappyBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + +match_extend_back_loop_encodeSnappyBetterBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B + +match_extend_back_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm12B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_single_match_nolit_encodeSnappyBetterBlockAsm12B: + TESTL R8, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm12B + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm12B + +match_nolit_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +one_byte_match_emit_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm12B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x34, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 65560(SP)(R11*4) + MOVL R15, 65560(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 65560(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm12B + +emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 16408(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 16408(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B + +candidateS_match_encodeSnappyBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + +match_extend_back_loop_encodeSnappyBetterBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B + +match_extend_back_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm10B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_single_match_nolit_encodeSnappyBetterBlockAsm10B: + TESTL R8, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm10B + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm10B + +match_nolit_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +one_byte_match_emit_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm10B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x36, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 16408(SP)(R11*4) + MOVL R15, 16408(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 16408(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm10B + +emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: SSE2 +TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 4120(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 4120(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B + +candidateS_match_encodeSnappyBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + +match_extend_back_loop_encodeSnappyBetterBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B + +match_extend_back_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm8B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B + BSFQ R11, R11 + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_single_match_nolit_encodeSnappyBetterBlockAsm8B: + TESTL R8, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm8B + LEAL 1(R12), R12 + DECL R8 + JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm8B + +match_nolit_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +one_byte_match_emit_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm8B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x38, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 4120(SP)(R11*4) + MOVL R15, 4120(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 4120(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm8B + +emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JLT one_byte_standalone + CMPL SI, $0x00000100 + JLT two_bytes_standalone + CMPL SI, $0x00010000 + JLT three_bytes_standalone + CMPL SI, $0x01000000 + JLT four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JLT repeat_three_standalone + CMPL DX, $0x00010100 + JLT repeat_four_standalone + CMPL DX, $0x0100ffff + JLT repeat_five_standalone + LEAL -16842747(DX), DX + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JL two_byte_offset_standalone + +four_bytes_loop_back_standalone: + CMPL DX, $0x40 + JLE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JL four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + JMP four_bytes_loop_back_standalone + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + MOVB $0x03, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JLE two_byte_offset_short_standalone + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + JMP two_byte_offset_standalone + +two_byte_offset_short_standalone: + CMPL DX, $0x0c + JGE emit_copy_three_standalone + CMPL CX, $0x00000800 + JGE emit_copy_three_standalone + MOVB $0x01, SI + LEAL -16(SI)(DX*4), DX + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + MOVB $0x02, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JL two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JLE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JL four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + MOVB $0x03, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JLE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + CMPL DX, $0x0c + JGE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JGE emit_copy_three_standalone_snappy + MOVB $0x01, SI + LEAL -16(SI)(DX*4), DX + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + MOVB $0x02, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JL matchlen_single_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + BSFQ BX, BX + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JGE matchlen_loopback_standalone + +matchlen_single_standalone: + TESTL DX, DX + JZ gen_match_len_end + +matchlen_single_loopback_standalone: + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + DECL DX + JNZ matchlen_single_loopback_standalone + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go new file mode 100644 index 0000000000000..fd857682e46d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -0,0 +1,525 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" +) + +const ( + S2IndexHeader = "s2idx\x00" + S2IndexTrailer = "\x00xdi2s" + maxIndexEntries = 1 << 16 +) + +// Index represents an S2/Snappy index. +type Index struct { + TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. + info []struct { + compressedOffset int64 + uncompressedOffset int64 + } + estBlockUncomp int64 +} + +func (i *Index) reset(maxBlock int) { + i.estBlockUncomp = int64(maxBlock) + i.TotalCompressed = -1 + i.TotalUncompressed = -1 + if len(i.info) > 0 { + i.info = i.info[:0] + } +} + +// allocInfos will allocate an empty slice of infos. +func (i *Index) allocInfos(n int) { + if n > maxIndexEntries { + panic("n > maxIndexEntries") + } + i.info = make([]struct { + compressedOffset int64 + uncompressedOffset int64 + }, 0, n) +} + +// add an uncompressed and compressed pair. +// Entries must be sent in order. +func (i *Index) add(compressedOffset, uncompressedOffset int64) error { + if i == nil { + return nil + } + lastIdx := len(i.info) - 1 + if lastIdx >= 0 { + latest := i.info[lastIdx] + if latest.uncompressedOffset == uncompressedOffset { + // Uncompressed didn't change, don't add entry, + // but update start index. + latest.compressedOffset = compressedOffset + i.info[lastIdx] = latest + return nil + } + if latest.uncompressedOffset > uncompressedOffset { + return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.compressedOffset > compressedOffset { + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + } + i.info = append(i.info, struct { + compressedOffset int64 + uncompressedOffset int64 + }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) + return nil +} + +// Find the offset at or before the wanted (uncompressed) offset. +// If offset is 0 or positive it is the offset from the beginning of the file. +// If the uncompressed size is known, the offset must be within the file. +// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. +// If the offset is negative, it is interpreted as the distance from the end of the file, +// where -1 represents the last byte. +// If offset from the end of the file is requested, but size is unknown, +// ErrUnsupported will be returned. +func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { + if i.TotalUncompressed < 0 { + return 0, 0, ErrCorrupt + } + if offset < 0 { + offset = i.TotalUncompressed + offset + if offset < 0 { + return 0, 0, io.ErrUnexpectedEOF + } + } + if offset > i.TotalUncompressed { + return 0, 0, io.ErrUnexpectedEOF + } + for _, info := range i.info { + if info.uncompressedOffset > offset { + break + } + compressedOff = info.compressedOffset + uncompressedOff = info.uncompressedOffset + } + return compressedOff, uncompressedOff, nil +} + +// reduce to stay below maxIndexEntries +func (i *Index) reduce() { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 { + return + } + + // Algorithm, keep 1, remove removeN entries... + removeN := (len(i.info) + 1) / maxIndexEntries + src := i.info + j := 0 + + // Each block should be at least 1MB, but don't reduce below 1000 entries. + for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 { + removeN++ + } + for idx := 0; idx < len(src); idx++ { + i.info[j] = src[idx] + j++ + idx += removeN + } + i.info = i.info[:j] + // Update maxblock estimate. + i.estBlockUncomp += i.estBlockUncomp * int64(removeN) +} + +func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { + i.reduce() + var tmp [binary.MaxVarintLen64]byte + + initSize := len(b) + // We make the start a skippable header+size. + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + // Total Uncompressed size + n := binary.PutVarint(tmp[:], uncompTotal) + b = append(b, tmp[:n]...) + // Total Compressed size + n = binary.PutVarint(tmp[:], compTotal) + b = append(b, tmp[:n]...) + // Put EstBlockUncomp size + n = binary.PutVarint(tmp[:], i.estBlockUncomp) + b = append(b, tmp[:n]...) + // Put length + n = binary.PutVarint(tmp[:], int64(len(i.info))) + b = append(b, tmp[:n]...) + + // Check if we should add uncompressed offsets + var hasUncompressed byte + for idx, info := range i.info { + if idx == 0 { + if info.uncompressedOffset != 0 { + hasUncompressed = 1 + break + } + continue + } + if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { + hasUncompressed = 1 + break + } + } + b = append(b, hasUncompressed) + + // Add each entry + if hasUncompressed == 1 { + for idx, info := range i.info { + uOff := info.uncompressedOffset + if idx > 0 { + prev := i.info[idx-1] + uOff -= prev.uncompressedOffset + (i.estBlockUncomp) + } + n = binary.PutVarint(tmp[:], uOff) + b = append(b, tmp[:n]...) + } + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + for idx, info := range i.info { + cOff := info.compressedOffset + if idx > 0 { + prev := i.info[idx-1] + cOff -= prev.compressedOffset + cPredict + // Update compressed size prediction, with half the error. + cPredict += cOff / 2 + } + n = binary.PutVarint(tmp[:], cOff) + b = append(b, tmp[:n]...) + } + + // Add Total Size. + // Stored as fixed size for easier reading. + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + // Update size + chunkLen := len(b) - initSize - skippableFrameHeader + b[initSize+1] = uint8(chunkLen >> 0) + b[initSize+2] = uint8(chunkLen >> 8) + b[initSize+3] = uint8(chunkLen >> 16) + //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) + return b +} + +// Load a binary index. +// A zero value Index can be used or a previous one can be reused. +func (i *Index) Load(b []byte) ([]byte, error) { + if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + if b[0] != ChunkTypeIndex { + return b, ErrCorrupt + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return b, io.ErrUnexpectedEOF + } + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return b, ErrUnsupported + } + b = b[len(S2IndexHeader):] + + // Total Uncompressed + if v, n := binary.Varint(b); n <= 0 || v < 0 { + return b, ErrCorrupt + } else { + i.TotalUncompressed = v + b = b[n:] + } + + // Total Compressed + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + i.TotalCompressed = v + b = b[n:] + } + + // Read EstBlockUncomp + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 { + return b, ErrCorrupt + } + i.estBlockUncomp = v + b = b[n:] + } + + var entries int + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 || v > maxIndexEntries { + return b, ErrCorrupt + } + entries = int(v) + b = b[n:] + } + if cap(i.info) < entries { + i.allocInfos(entries) + } + i.info = i.info[:entries] + + if len(b) < 1 { + return b, io.ErrUnexpectedEOF + } + hasUncompressed := b[0] + b = b[1:] + if hasUncompressed&1 != hasUncompressed { + return b, ErrCorrupt + } + + // Add each uncompressed entry + for idx := range i.info { + var uOff int64 + if hasUncompressed != 0 { + // Load delta + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + uOff = v + b = b[n:] + } + } + + if idx > 0 { + prev := i.info[idx-1].uncompressedOffset + uOff += prev + (i.estBlockUncomp) + if uOff <= prev { + return b, ErrCorrupt + } + } + if uOff < 0 { + return b, ErrCorrupt + } + i.info[idx].uncompressedOffset = uOff + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + // Add each compressed entry + for idx := range i.info { + var cOff int64 + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + cOff = v + b = b[n:] + } + + if idx > 0 { + // Update compressed size prediction, with half the error. + cPredictNew := cPredict + cOff/2 + + prev := i.info[idx-1].compressedOffset + cOff += prev + cPredict + if cOff <= prev { + return b, ErrCorrupt + } + cPredict = cPredictNew + } + if cOff < 0 { + return b, ErrCorrupt + } + i.info[idx].compressedOffset = cOff + } + if len(b) < 4+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + // Skip size... + b = b[4:] + + // Check trailer... + if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return b, ErrCorrupt + } + return b[len(S2IndexTrailer):], nil +} + +// LoadStream will load an index from the end of the supplied stream. +// ErrUnsupported will be returned if the signature cannot be found. +// ErrCorrupt will be returned if unexpected values are found. +// io.ErrUnexpectedEOF is returned if there are too few bytes. +// IO errors are returned as-is. +func (i *Index) LoadStream(rs io.ReadSeeker) error { + // Go to end. + _, err := rs.Seek(-10, io.SeekEnd) + if err != nil { + return err + } + var tmp [10]byte + _, err = io.ReadFull(rs, tmp[:]) + if err != nil { + return err + } + // Check trailer... + if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return ErrUnsupported + } + sz := binary.LittleEndian.Uint32(tmp[:4]) + if sz > maxChunkSize+skippableFrameHeader { + return ErrCorrupt + } + _, err = rs.Seek(-int64(sz), io.SeekEnd) + if err != nil { + return err + } + + // Read index. + buf := make([]byte, sz) + _, err = io.ReadFull(rs, buf) + if err != nil { + return err + } + _, err = i.Load(buf) + return err +} + +// IndexStream will return an index for a stream. +// The stream structure will be checked, but +// data within blocks is not verified. +// The returned index can either be appended to the end of the stream +// or stored separately. +func IndexStream(r io.Reader) ([]byte, error) { + var i Index + var buf [maxChunkSize]byte + var readHeader bool + for { + _, err := io.ReadFull(r, buf[:4]) + if err != nil { + if err == io.EOF { + return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil + } + return nil, err + } + // Start of this chunk. + startChunk := i.TotalCompressed + i.TotalCompressed += 4 + + chunkType := buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + return nil, ErrCorrupt + } + readHeader = true + } + chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 + if chunkLen < checksumSize { + return nil, ErrCorrupt + } + + i.TotalCompressed += int64(chunkLen) + _, err = io.ReadFull(r, buf[:chunkLen]) + if err != nil { + return nil, io.ErrUnexpectedEOF + } + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + // Skip checksum. + dLen, err := DecodedLen(buf[checksumSize:]) + if err != nil { + return nil, err + } + if dLen > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(dLen) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(dLen) + continue + case chunkTypeUncompressedData: + n2 := chunkLen - checksumSize + if n2 > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(n2) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(n2) + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + return nil, ErrCorrupt + } + + if string(buf[:len(magicBody)]) != magicBody { + if string(buf[:len(magicBody)]) != magicBodySnappy { + return nil, ErrCorrupt + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + return nil, ErrUnsupported + } + if chunkLen > maxChunkSize { + return nil, ErrUnsupported + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + } +} + +// JSON returns the index as JSON text. +func (i *Index) JSON() []byte { + x := struct { + TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. + Offsets []struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + } `json:"offsets"` + EstBlockUncomp int64 `json:"est_block_uncompressed"` + }{ + TotalUncompressed: i.TotalUncompressed, + TotalCompressed: i.TotalCompressed, + EstBlockUncomp: i.estBlockUncomp, + } + for _, v := range i.info { + x.Offsets = append(x.Offsets, struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + }{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) + } + b, _ := json.MarshalIndent(x, "", " ") + return b +} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go new file mode 100644 index 0000000000000..dae3f731fabec --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -0,0 +1,143 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2 implements the S2 compression format. +// +// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, +// which is why it features concurrent compression for bigger payloads. +// +// Decoding is compatible with Snappy compressed content, +// but content compressed with S2 cannot be decompressed by Snappy. +// +// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 +// +// There are actually two S2 formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a S2 stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// A "better" compression option is available. This will trade some compression +// speed +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// Blocks to not offer much data protection, so it is up to you to +// add data validation of decompressed blocks. +// +// Streams perform CRC validation of the decompressed data. +// Stream compression will also be performed on multiple CPU cores concurrently +// significantly improving throughput. +package s2 + +import ( + "bytes" + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy + magicBodySnappy = "sNaPpY" + magicBody = "S2sTwO" + + // maxBlockSize is the maximum size of the input to encodeBlock. + // + // For the framing format (Writer type instead of Encode function), + // this is the maximum uncompressed size of a block. + maxBlockSize = 4 << 20 + + // minBlockSize is the minimum size of block setting when creating a writer. + minBlockSize = 4 << 10 + + skippableFrameHeader = 4 + maxChunkSize = 1<<24 - 1 // 16777215 + + // Default block size + defaultBlockSize = 1 << 20 + + // maxSnappyBlockSize is the maximum snappy block size. + maxSnappyBlockSize = 1 << 16 + + obufHeaderLen = checksumSize + chunkHeaderSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + ChunkTypeIndex = 0x99 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// literalExtraSize returns the extra size of encoding n literals. +// n should be >= 0 and <= math.MaxUint32. +func literalExtraSize(n int64) int64 { + if n == 0 { + return 0 + } + switch { + case n < 60: + return 1 + case n < 1<<8: + return 2 + case n < 1<<16: + return 3 + case n < 1<<24: + return 4 + default: + return 5 + } +} + +type byter interface { + Bytes() []byte +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index 7d1dd33527568..dfc0c2d537738 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -8,9 +8,20 @@ linters: - typecheck - goimports - misspell + - revive - govet - - golint - ineffassign - gosimple - deadcode - structcheck + - gocritic + +issues: + exclude-use-default: false + exclude: + # todo fix these when we get enough time. + - "singleCaseSwitch: should rewrite switch statement to if statement" + - "unlambda: replace" + - "captLocal:" + - "ifElseChain:" + - "elseif:" diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md index 8b1ee86c6d098..24522ef75a1ae 100644 --- a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md @@ -1,4 +1,3 @@ - ### Developer Guidelines ``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index a6f0b9bcfcda5..ac4a328f0e3ce 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,7 +9,7 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0) + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml @@ -21,7 +21,10 @@ test: @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... examples: - @$(foreach v,$(wildcard examples/s3/*), go build -o ${TMPDIR}/$(basename $(v)) $(v) || exit 1;) + @echo "Building s3 examples" + @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + @echo "Building minio examples" + @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) functional-test: @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index ab5bcb53f331d..4e3abf71d2d57 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -1,4 +1,4 @@ -# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. @@ -8,7 +8,7 @@ This document assumes that you have a working [Go development environment](https ## Download from Github ```sh -GO111MODULE=on go get github.com/minio/minio-go/v7 +go get github.com/minio/minio-go/v7 ``` ## Initialize MinIO Client @@ -104,18 +104,17 @@ func main() { contentType := "application/zip" // Upload the zip file with FPutObject - n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) if err != nil { log.Fatalln(err) } - log.Printf("Successfully uploaded %s of size %d\n", objectName, n) + log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) } ``` ### Run FileUploader ```sh -export GO111MODULE=on go run file-uploader.go 2016/08/13 17:03:28 Successfully created mymusic 2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 @@ -171,9 +170,9 @@ The full API Reference is available here. * [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) ## Full Examples @@ -215,14 +214,10 @@ The full API Reference is available here. ### Full Examples : File Object Operations * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) ### Full Examples : Object Operations * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) @@ -248,4 +243,4 @@ The full API Reference is available here. [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) ## License -This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. +This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go index e02ab84afa3d1..24f94e03440ca 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -28,7 +28,7 @@ import ( ) // SetBucketEncryption sets the default encryption configuration on an existing bucket. -func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { +func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -70,7 +70,7 @@ func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, conf } // RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. -func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -99,7 +99,7 @@ func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) e // GetBucketEncryption gets the default encryption configuration // on an existing bucket with a context to control cancellations and timeouts. -func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { +func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go index e1fac813cc6de..7e2199732ee33 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -30,7 +30,7 @@ import ( ) // SetBucketLifecycle set the lifecycle on an existing bucket. -func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { +func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -51,7 +51,7 @@ func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, confi } // Saves a new bucket lifecycle. -func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { +func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -81,7 +81,7 @@ func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf [ } // Remove lifecycle from a bucket. -func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { +func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -101,7 +101,7 @@ func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) er } // GetBucketLifecycle fetch bucket lifecycle configuration -func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { +func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -120,7 +120,7 @@ func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lif } // Request server for current bucket lifecycle. -func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { +func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go index 76787ecabc13d..dc37b0c078e69 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -32,7 +32,7 @@ import ( ) // SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. -func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { +func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -73,12 +73,12 @@ func (c Client) SetBucketNotification(ctx context.Context, bucketName string, co } // RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { +func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) } // GetBucketNotification returns current bucket notification configuration -func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { +func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return notification.Configuration{}, err @@ -87,7 +87,7 @@ func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (b } // Request server for notification rules. -func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { +func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { urlValues := make(url.Values) urlValues.Set("notification", "") @@ -103,7 +103,6 @@ func (c Client) getBucketNotification(ctx context.Context, bucketName string) (n return notification.Configuration{}, err } return processBucketNotificationResponse(bucketName, resp) - } // processes the GetNotification http response from the server. @@ -121,12 +120,12 @@ func processBucketNotificationResponse(bucketName string, resp *http.Response) ( } // ListenNotification listen for all events, this is a MinIO specific API -func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { +func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { return c.ListenBucketNotification(ctx, "", prefix, suffix, events) } // ListenBucketNotification listen for bucket events, this is a MinIO specific API -func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { +func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { notificationInfoCh := make(chan notification.Info, 1) const notificationCapacity = 4 * 1024 * 1024 notificationEventBuffer := make([]byte, notificationCapacity) @@ -207,7 +206,7 @@ func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix // Use a higher buffer to support unexpected // caching done by proxies bio.Buffer(notificationEventBuffer, notificationCapacity) - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary // Unmarshal each line, returns marshaled values. for bio.Scan() { diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go index 72676f34470b6..e7edf9c971261 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -27,7 +27,7 @@ import ( ) // SetBucketPolicy sets the access permissions on an existing bucket. -func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { +func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -43,7 +43,7 @@ func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) } // Saves a new bucket policy. -func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { +func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -63,7 +63,7 @@ func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) return err } if resp != nil { - if resp.StatusCode != http.StatusNoContent { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { return httpRespToErrorResponse(resp, bucketName, "") } } @@ -71,7 +71,7 @@ func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) } // Removes all policies on a bucket. -func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error { +func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -87,11 +87,16 @@ func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error if err != nil { return err } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil } // GetBucketPolicy returns the current policy -func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { +func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err @@ -108,7 +113,7 @@ func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, } // Request server for current bucket policy. -func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { +func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go index bfd5ea4360dd2..461984e342af6 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -20,21 +20,25 @@ package minio import ( "bytes" "context" + "encoding/json" "encoding/xml" + "io/ioutil" "net/http" "net/url" + "time" + "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/replication" "github.com/minio/minio-go/v7/pkg/s3utils" ) // RemoveBucketReplication removes a replication config on an existing bucket. -func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { return c.removeBucketReplication(ctx, bucketName) } // SetBucketReplication sets a replication config on an existing bucket. -func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { +func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -49,7 +53,7 @@ func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg } // Saves a new bucket replication. -func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { +func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -82,7 +86,7 @@ func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg } // Remove replication from a bucket. -func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error { +func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -103,7 +107,7 @@ func (c Client) removeBucketReplication(ctx context.Context, bucketName string) // GetBucketReplication fetches bucket replication configuration.If config is not // found, returns empty config with nil error. -func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { +func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return cfg, err @@ -120,7 +124,7 @@ func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cf } // Request server for current bucket replication config. -func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { +func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -147,3 +151,138 @@ func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cf return cfg, nil } + +// GetBucketReplicationMetrics fetches bucket replication status metrics +func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// mustGetUUID - get a random UUID. +func mustGetUUID() string { + u, err := uuid.NewRandom() + if err != nil { + return "" + } + return u.String() +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { + rID = mustGetUUID() + _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) + if err != nil { + return rID, err + } + return rID, nil +} + +// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if +// ExistingObjectReplication is enabled in the replication config +func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { + return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset", "") + if olderThan > 0 { + urlValues.Set("older-than", olderThan.String()) + } + if tgtArn != "" { + urlValues.Set("arn", tgtArn) + } + urlValues.Set("reset-id", resetID) + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} + +// GetBucketReplicationResyncStatus gets the status of replication resync +func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return rinfo, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset-status", "") + if arn != "" { + urlValues.Set("arn", arn) + } + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go index fcb966e636934..1615f8f87391b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -32,7 +32,7 @@ import ( // GetBucketTagging fetch tagging configuration for a bucket with a // context to control cancellations and timeouts. -func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { +func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -64,7 +64,7 @@ func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags. // SetBucketTagging sets tagging configuration for a bucket // with a context to control cancellations and timeouts. -func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { +func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -107,7 +107,7 @@ func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *t // RemoveBucketTagging removes tagging configuration for a // bucket with a context to control cancellations and timeouts. -func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go index e3ceeb336d460..930b1b93a2fc4 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -27,7 +27,7 @@ import ( ) // SetBucketVersioning sets a bucket versioning configuration -func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { +func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -67,12 +67,12 @@ func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, conf } // EnableVersioning - enable object versioning in given bucket. -func (c Client) EnableVersioning(ctx context.Context, bucketName string) error { +func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) } // SuspendVersioning - suspend object versioning in given bucket. -func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error { +func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) } @@ -102,7 +102,7 @@ func (b BucketVersioningConfiguration) Suspended() bool { // GetBucketVersioning gets the versioning configuration on // an existing bucket with a context to control cancellations and timeouts. -func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { +func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return BucketVersioningConfiguration{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index 40238de35ffdd..b59924a3d514f 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -201,9 +201,9 @@ func (opts CopySrcOptions) validate() (err error) { } // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. -func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { - +func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, +) (ObjectInfo, error) { // Build headers. headers := make(http.Header) @@ -220,6 +220,19 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck if dstOpts.Internal.SourceETag != "" { headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) } + if dstOpts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "") + } + if !dstOpts.Internal.LegalholdTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.RetentionTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.TaggingTimestamp.IsZero() { + headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + if len(dstOpts.UserTags) != 0 { headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) } @@ -230,8 +243,10 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck customHeader: headers, } if dstOpts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { - return ObjectInfo{}, errInvalidArgument(err.Error()) + if dstOpts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { + return ObjectInfo{}, errInvalidArgument(err.Error()) + } } urlValues := make(url.Values) urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) @@ -269,9 +284,9 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck return objInfo, nil } -func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { - +func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset int64, length int64, metadata map[string]string, +) (p CompletePart, err error) { headers := make(http.Header) // Set source @@ -322,9 +337,9 @@ func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, dest // uploadPartCopy - helper function to create a part in a multipart // upload via an upload-part-copy request // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header) (p CompletePart, err error) { - +func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header, +) (p CompletePart, err error) { // Build query parameters urlValues := make(url.Values) urlValues.Set("partNumber", strconv.Itoa(partNumber)) @@ -362,7 +377,7 @@ func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID str // and concatenates them into a new object using only server-side copying // operations. Optionally takes progress reader hook for applications to // look at current progress. -func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { +func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { if len(srcs) < 1 || len(srcs) > maxPartsCount { return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") } @@ -383,7 +398,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ... var err error for i, src := range srcs { opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} - srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts) + srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) if err != nil { return UploadInfo{}, err } @@ -477,7 +492,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ... objParts := []CompletePart{} partIndex := 1 for i, src := range srcs { - var h = make(http.Header) + h := make(http.Header) src.Marshal(h) if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { dst.Encryption.Marshal(h) @@ -510,7 +525,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ... // 4. Make final complete-multipart request. uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, - completeMultipartUpload{Parts: objParts}) + completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go index 9af036ec0569d..1c0ad2be4c0f7 100644 --- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -25,7 +25,7 @@ import ( ) // CopyObject - copy a source object into a new object -func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { +func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { if err := src.validate(); err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 970e1fa5e3ccf..2f5912f378bf3 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -64,8 +64,9 @@ func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { // Owner name. type Owner struct { - DisplayName string `json:"name"` - ID string `json:"id"` + XMLName xml.Name `xml:"Owner" json:"owner"` + DisplayName string `xml:"ID" json:"name"` + ID string `xml:"DisplayName" json:"id"` } // UploadInfo contains information about the @@ -85,6 +86,14 @@ type UploadInfo struct { ExpirationRuleID string } +// RestoreInfo contains information of the restore operation of an archived object +type RestoreInfo struct { + // Is the restoring operation is still ongoing + OngoingRestore bool + // When the restored copy of the archived object will be removed + ExpiryTime time.Time +} + // ObjectInfo container for object metadata. type ObjectInfo struct { // An ETag is optionally set to md5sum of an object. In case of multipart objects, @@ -115,14 +124,7 @@ type ObjectInfo struct { Owner Owner // ACL grant. - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` + Grant []Grant // The class of storage used to store the object. StorageClass string `json:"storageClass"` @@ -144,6 +146,8 @@ type ObjectInfo struct { Expiration time.Time ExpirationRuleID string + Restore *RestoreInfo + // Error Err error `json:"-"` } diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index c45c4fdcb2434..dd781cae33b10 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -18,8 +18,11 @@ package minio import ( + "bytes" "encoding/xml" "fmt" + "io" + "io/ioutil" "net/http" ) @@ -44,6 +47,7 @@ type ErrorResponse struct { Message string BucketName string Key string + Resource string RequestID string `xml:"RequestId"` HostID string `xml:"HostId"` @@ -98,6 +102,19 @@ const ( reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." ) +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} + // httpRespToErrorResponse returns a new encoded ErrorResponse // structure as error. func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { @@ -111,7 +128,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Server: resp.Header.Get("Server"), } - err := xmlDecoder(resp.Body, &errResp) + errBody, err := xmlDecodeAndBody(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. if err != nil { switch resp.StatusCode { @@ -156,10 +173,17 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Key: objectName, } default: + msg := resp.Status + if len(errBody) > 0 { + msg = string(errBody) + if len(msg) > 1024 { + msg = msg[:1024] + "..." + } + } errResp = ErrorResponse{ StatusCode: resp.StatusCode, Code: resp.Status, - Message: resp.Status, + Message: msg, BucketName: bucketName, } } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go index afa53079d3e92..9041d99e937bb 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -19,29 +19,41 @@ package minio import ( "context" + "encoding/xml" "net/http" "net/url" ) +// Grantee represents the person being granted permissions. +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` +} + +// Grant holds grant information +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee + Permission string `xml:"Permission"` +} + +// AccessControlList contains the set of grantees and the permissions assigned to each grantee. +type AccessControlList struct { + XMLName xml.Name `xml:"AccessControlList"` + Grant []Grant + Permission string `xml:"Permission"` +} + type accessControlPolicy struct { - Owner struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - } `xml:"Owner"` - AccessControlList struct { - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` - } `xml:"AccessControlList"` + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner + AccessControlList AccessControlList } // GetObjectACL get object ACLs -func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { +func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, @@ -64,7 +76,7 @@ func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) return nil, err } - objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{}) + objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go index bccff45787210..2332dbf101fa1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -28,7 +28,7 @@ import ( // FGetObject - download contents of an object to a local file. // The options can be used to specify the GET request further. -func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { +func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -57,7 +57,7 @@ func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath objectDir, _ := filepath.Split(filePath) if objectDir != "" { // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0700); err != nil { + if err := os.MkdirAll(objectDir, 0o700); err != nil { return err } } @@ -72,7 +72,7 @@ func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath filePartPath := filePath + objectStat.ETag + ".part.minio" // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index ef9dd45d4c49d..2ce4b260fc64d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -24,13 +24,14 @@ import ( "io" "net/http" "net/url" + "strconv" "sync" "github.com/minio/minio-go/v7/pkg/s3utils" ) // GetObject wrapper function that accepts a request context -func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { +func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -139,7 +140,7 @@ func (c Client) GetObject(ctx context.Context, bucketName, objectName string, op // Remove range header if already set, for stat Operations to get original file size. delete(opts.headers, "Range") - objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + objectInfo, err = c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { resCh <- getResponse{ Error: err, @@ -162,7 +163,7 @@ func (c Client) GetObject(ctx context.Context, bucketName, objectName string, op if etag != "" && !snowball { opts.SetMatchETag(etag) } - objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + objectInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { resCh <- getResponse{ Error: err, @@ -639,7 +640,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { +func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { // Validate input arguments. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, nil, err @@ -652,6 +653,9 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op if opts.VersionID != "" { urlValues.Set("versionId", opts.VersionID) } + if opts.PartNumber > 0 { + urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber)) + } // Execute GET on objectName. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 9e0cb21479ab6..184ef9f868176 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -25,7 +25,7 @@ import ( "github.com/minio/minio-go/v7/pkg/encrypt" ) -//AdvancedGetOptions for internal use by MinIO server - not intended for client use. +// AdvancedGetOptions for internal use by MinIO server - not intended for client use. type AdvancedGetOptions struct { ReplicationDeleteMarker bool ReplicationProxyRequest string @@ -37,6 +37,7 @@ type GetObjectOptions struct { headers map[string]string ServerSideEncryption encrypt.ServerSide VersionID string + PartNumber int // To be not used by external applications Internal AdvancedGetOptions } diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index 7996c11e9db37..9b2b00ae3c0fe 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -36,7 +36,7 @@ import ( // fmt.Println(message) // } // -func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { +func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) defer closeResponse(resp) @@ -56,14 +56,13 @@ func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { return listAllMyBucketsResult.Buckets.Bucket, nil } -/// Bucket Read Operations. - -func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo { +// Bucket List Operations. +func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" - if recursive { + if opts.Recursive { // If recursive we do not delimit. delimiter = "" } @@ -81,7 +80,7 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri } // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -96,8 +95,8 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri var continuationToken string for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken, - fetchOwner, metadata, delimiter, maxKeys) + result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, + fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -148,12 +147,13 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. // request parameters :- // --------- +// ?prefix - Limits the response to keys that begin with the specified prefix. // ?continuation-token - Used to continue iterating over a set of objects +// ?metadata - Specifies if we want metadata for the objects as part of list operation. // ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?start-after - Sets a marker to start listing lexically at this key onwards. // ?max-keys - Sets the maximum number of keys returned in the response body. -// ?metadata - Specifies if we want metadata for the objects as part of list operation. -func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { +func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err @@ -173,6 +173,11 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix urlValues.Set("metadata", "true") } + // Set this conditionally if asked + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + // Always set encoding-type in ListObjects V2 urlValues.Set("encoding-type", "url") @@ -202,6 +207,7 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -246,12 +252,12 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix return listBucketResult, nil } -func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo { +func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" - if recursive { + if opts.Recursive { // If recursive we do not delimit. delimiter = "" } @@ -264,7 +270,7 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string return objectStatCh } // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -276,10 +282,10 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string go func(objectStatCh chan<- ObjectInfo) { defer close(objectStatCh) - marker := "" + marker := opts.StartAfter for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys) + result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -326,12 +332,12 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string return objectStatCh } -func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo { +func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. resultCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" - if recursive { + if opts.Recursive { // If recursive we do not delimit. delimiter = "" } @@ -346,7 +352,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin } // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(resultCh) resultCh <- ObjectInfo{ Err: err, @@ -365,7 +371,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys) + result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) if err != nil { resultCh <- ObjectInfo{ Err: err, @@ -376,15 +382,14 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin // If contents are available loop through and send over channel. for _, version := range result.Versions { info := ObjectInfo{ - ETag: trimEtag(version.ETag), - Key: version.Key, - LastModified: version.LastModified, - Size: version.Size, - Owner: version.Owner, - StorageClass: version.StorageClass, - IsLatest: version.IsLatest, - VersionID: version.VersionID, - + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified, + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, IsDeleteMarker: version.isDeleteMarker, } select { @@ -438,7 +443,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) { +func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListVersionsResult{}, err @@ -483,6 +488,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -534,7 +540,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { +func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketResult{}, err @@ -571,6 +577,7 @@ func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -626,9 +633,25 @@ type ListObjectsOptions struct { // batch, advanced use-case not useful for most // applications MaxKeys int + // StartAfter start listing lexically at this + // object onwards, this value can also be set + // for Marker when `UseV1` is set to true. + StartAfter string // Use the deprecated list objects V1 API UseV1 bool + + headers http.Header +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *ListObjectsOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(http.Header) + } + o.headers.Set(key, value) } // ListObjects returns objects list after evaluating the passed options. @@ -638,24 +661,24 @@ type ListObjectsOptions struct { // fmt.Println(object) // } // -func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { +func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { - return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + return c.listObjectVersions(ctx, bucketName, opts) } // Use legacy list objects v1 API if opts.UseV1 { - return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + return c.listObjects(ctx, bucketName, opts) } // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. if location, ok := c.bucketLocCache.Get(bucketName); ok { if location == "snowball" { - return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + return c.listObjects(ctx, bucketName, opts) } } - return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys) + return c.listObjectsV2(ctx, bucketName, opts) } // ListIncompleteUploads - List incompletely uploaded multipart objects. @@ -674,12 +697,12 @@ func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObj // for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { // fmt.Println(message) // } -func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { +func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } // listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { +func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { // Allocate channel for multipart uploads. objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) // Delimiter is set to "/" by default. @@ -751,7 +774,6 @@ func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPre }(objectMultipartStatCh) // return. return objectMultipartStatCh - } // listMultipartUploadsQuery - (List Multipart Uploads). @@ -765,7 +787,7 @@ func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPre // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { +func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set uploads. @@ -844,7 +866,7 @@ func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMa } // listObjectParts list all object parts recursively. -func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { +func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { // Part number marker for the next batch of request. var nextPartNumberMarker int partsInfo = make(map[int]ObjectPart) @@ -873,7 +895,7 @@ func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, upl } // findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { +func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { var uploadIDs []string // Make list incomplete uploads recursive. isRecursive := true @@ -900,7 +922,7 @@ func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string // ?part-number-marker - Specifies the part after which listing should // begin. // ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { +func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number marker. diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go index b139c1687b52c..0c027d550f749 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -81,7 +81,7 @@ func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { } // PutObjectLegalHold : sets object legal hold for a given object and versionID. -func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { +func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -135,7 +135,7 @@ func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName s } // GetObjectLegalHold gets legal-hold status of given object. -func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { +func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go index 29f52b0546d7c..f0a439853f9cd 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-lock.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go @@ -139,7 +139,7 @@ func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit } // SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { +func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -184,7 +184,7 @@ func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string } // GetObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { +func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", nil, nil, nil, err @@ -230,12 +230,12 @@ func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (obj } // GetBucketObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { +func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) return mode, validity, unit, err } // SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { +func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) } diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go index 54f2762de9292..b29cb1f8dce6c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-retention.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -63,7 +63,7 @@ type PutObjectRetentionOptions struct { } // PutObjectRetention sets object retention for a given object and versionID. -func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { +func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -126,7 +126,7 @@ func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName s } // GetObjectRetention gets retention of given object. -func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { +func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go index 2709efcd1278f..305c36de85615 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -36,7 +36,7 @@ type PutObjectTaggingOptions struct { // PutObjectTagging replaces or creates object tag(s) and can target // a specific object version in a versioned bucket. -func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { +func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -87,7 +87,7 @@ type GetObjectTaggingOptions struct { // GetObjectTagging fetches object tag(s) with options to target // a specific object version in a versioned bucket. -func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { +func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -125,7 +125,7 @@ type RemoveObjectTaggingOptions struct { // RemoveObjectTagging removes object tag(s) with options to control a specific object // version in a versioned bucket -func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { +func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go index 80c363da5b788..2e4bacf126319 100644 --- a/vendor/github.com/minio/minio-go/v7/api-presigned.go +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -30,7 +30,7 @@ import ( // presignURL - Returns a presigned URL for an input 'method'. // Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c *Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { // Input validation. if method == "" { return nil, errInvalidArgument("method cannot be empty.") @@ -45,11 +45,12 @@ func (c Client) presignURL(ctx context.Context, method string, bucketName string // Convert expires into seconds. expireSeconds := int64(expires / time.Second) reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - queryValues: reqParams, + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + extraPresignHeader: extraHeaders, } // Instantiate a new request. @@ -65,43 +66,54 @@ func (c Client) presignURL(ctx context.Context, method string, bucketName string // data without credentials. URL can have a maximum expiry of // upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. -func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c *Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams) + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) } // PresignedHeadObject - Returns a presigned URL to access // object metadata without credentials. URL can have a maximum expiry // of upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. -func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c *Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams) + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) } // PresignedPutObject - Returns a presigned URL to upload an object // without credentials. URL can have a maximum expiry of upto 7days // or a minimum of 1sec. -func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { +func (c *Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil) + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) } -// Presign - returns a presigned URL for any http method of your choice -// along with custom request params. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. -func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams) +// PresignHeader - similar to Presign() but allows including HTTP headers that +// will be used to build the signature. The request using the resulting URL will +// need to have the exact same headers to be added for signature validation to +// pass. +// +// FIXME: The extra header parameter should be included in Presign() in the next +// major version bump, and this function should then be deprecated. +func (c *Client) PresignHeader(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) +} + +// Presign - returns a presigned URL for any http method of your choice along +// with custom request params and extra signed headers. URL can have a maximum +// expiry of upto 7days or a minimum of 1sec. +func (c *Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) } // PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { +func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { // Validate input arguments. if p.expiration.IsZero() { return nil, nil, errors.New("Expiration time must be specified") diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go index df9fe98afa896..1a6db3e10d57c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -26,8 +26,8 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) -/// Bucket operations -func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { +// Bucket operations +func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { // Validate the input arguments. if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { return err @@ -42,7 +42,7 @@ func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuck return err } -func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { +func (c *Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { defer func() { // Save the location into cache on a successful makeBucket response. if err == nil { @@ -118,6 +118,6 @@ type MakeBucketOptions struct { // // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { +func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { return c.makeBucket(ctx, bucketName, opts) } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index 3d0408e534fe7..149a536e42f00 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -26,6 +26,8 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) +const nullVersionID = "null" + // Verify if reader is *minio.Object func isObject(reader io.Reader) (ok bool) { _, ok = reader.(*Object) @@ -57,17 +59,17 @@ func isReadAt(reader io.Reader) (ok bool) { return } -// optimalPartInfo - calculate the optimal part info for a given +// OptimalPartInfo - calculate the optimal part info for a given // object size. // // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible // object storage it will have the following parameters as constants. // // maxPartsCount - 10000 -// minPartSize - 128MiB +// minPartSize - 16MiB // maxMultipartPutObjectSize - 5TiB // -func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { +func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { // object size is '-1' set it to 5TiB. var unknownSize bool if objectSize == -1 { @@ -130,7 +132,7 @@ func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCou // getUploadID - fetch upload id if already present for an object name // or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { +func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go index 6c0f20df37b42..4d29dfc18a449 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go @@ -27,7 +27,7 @@ import ( ) // FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 1c862ad968b1b..342a8dc2b8091 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -32,12 +32,14 @@ import ( "strconv" "strings" + "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" ) -func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) @@ -55,7 +57,7 @@ func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName s return info, err } -func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -72,7 +74,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) if err != nil { return UploadInfo{}, err } @@ -175,7 +177,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } @@ -185,7 +187,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { +func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err @@ -198,6 +200,15 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN urlValues := make(url.Values) urlValues.Set("uploads", "") + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + } + } + urlValues.Set("versionId", opts.Internal.SourceVersionID) + } + // Set ContentType header. customHeader := opts.Header() @@ -229,8 +240,9 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN } // uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { +func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide, +) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -300,8 +312,9 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload) (UploadInfo, error) { +func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload, opts PutObjectOptions, +) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -328,6 +341,7 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN contentBody: completeMultipartUploadBuffer, contentLength: int64(len(completeMultipartUploadBytes)), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + customHeader: opts.Header(), } // Execute POST to complete multipart upload for an objectName. @@ -381,5 +395,4 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN Expiration: expTime, ExpirationRuleID: ruleID, }, nil - } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index f4694fc5525ee..2497aecf3df34 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -41,9 +41,9 @@ import ( // - *minio.Object // - Any reader which has a method 'ReadAt()' // -func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - +func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) @@ -90,8 +90,9 @@ type uploadPartReq struct { // temporary files for staging all the data, these temporary files are // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -101,7 +102,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa } // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) if err != nil { return UploadInfo{}, err } @@ -147,7 +148,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa } close(uploadPartsCh) - var partsBuf = make([][]byte, opts.getNumThreads()) + partsBuf := make([][]byte, opts.getNumThreads()) for i := range partsBuf { partsBuf[i] = make([]byte, 0, partSize) } @@ -171,7 +172,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa } n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize]) - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { uploadedPartsCh <- uploadedPartRes{ Error: rerr, } @@ -231,7 +232,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } @@ -240,8 +241,9 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa return uploadInfo, nil } -func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -251,7 +253,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu } // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) if err != nil { return UploadInfo{}, err } @@ -358,7 +360,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } @@ -369,7 +371,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu // putObject special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -430,7 +432,7 @@ func (c Client) putObject(ctx context.Context, bucketName, objectName string, re // putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { +func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -452,8 +454,10 @@ func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, contentSHA256Hex: sha256Hex, } if opts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } } urlValues := make(url.Values) urlValues.Set("versionId", opts.Internal.SourceVersionID) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 0cbb0a7a30f7d..0dc77e6c36281 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -55,10 +55,14 @@ func (r ReplicationStatus) Empty() bool { // AdvancedPutOptions for internal use - to be utilized by replication, ILM transition // implementation on MinIO server type AdvancedPutOptions struct { - SourceVersionID string - SourceETag string - ReplicationStatus ReplicationStatus - SourceMTime time.Time + SourceVersionID string + SourceETag string + ReplicationStatus ReplicationStatus + SourceMTime time.Time + ReplicationRequest bool + RetentionTimestamp time.Time + TaggingTimestamp time.Time + LegalholdTimestamp time.Time } // PutObjectOptions represents options specified by user for PutObject call @@ -152,6 +156,19 @@ func (opts PutObjectOptions) Header() (header http.Header) { if opts.Internal.SourceETag != "" { header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) } + if opts.Internal.ReplicationRequest { + header.Set(minIOBucketReplicationRequest, "") + } + if !opts.Internal.LegalholdTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.RetentionTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.TaggingTimestamp.IsZero() { + header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + if len(opts.UserTags) != 0 { header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) } @@ -197,15 +214,23 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // You must have WRITE permissions on a bucket to create an object. // -// - For size smaller than 128MiB PutObject automatically does a -// single atomic Put operation. -// - For size larger than 128MiB PutObject automatically does a -// multipart Put operation. +// - For size smaller than 16MiB PutObject automatically does a +// single atomic PUT operation. +// +// - For size larger than 16MiB PutObject automatically does a +// multipart upload operation. +// // - For size input as -1 PutObject does a multipart Put operation // until input stream reaches EOF. Maximum object size that can // be uploaded through this operation will be 5TiB. -func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (info UploadInfo, err error) { +// +// WARNING: Passing down '-1' will use memory and these cannot +// be reused for best outcomes for PutObject(), pass the size always. +// +// NOTE: Upon errors during upload multipart operation is entirely aborted. +func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { if objectSize < 0 && opts.DisableMultipart { return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") } @@ -218,7 +243,7 @@ func (c Client) PutObject(ctx context.Context, bucketName, objectName string, re return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) } -func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Check for largest object size allowed. if size > int64(maxMultipartPutObjectSize) { return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) @@ -252,7 +277,7 @@ func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName stri return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) } -func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -269,7 +294,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) if err != nil { return UploadInfo{}, err } @@ -356,7 +381,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go new file mode 100644 index 0000000000000..b7502e2d92700 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -0,0 +1,215 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/s2" +) + +// SnowballOptions contains options for PutObjectsSnowball calls. +type SnowballOptions struct { + // Opts is options applied to all objects. + Opts PutObjectOptions + + // Processing options: + + // InMemory specifies that all objects should be collected in memory + // before they are uploaded. + // If false a temporary file will be created. + InMemory bool + + // Compress enabled content compression before upload. + // Compression will typically reduce memory and network usage, + // Compression can safely be enabled with MinIO hosts. + Compress bool +} + +// SnowballObject contains information about a single object to be added to the snowball. +type SnowballObject struct { + // Key is the destination key, including prefix. + Key string + + // Size is the content size of this object. + Size int64 + + // Modtime to apply to the object. + ModTime time.Time + + // Content of the object. + // Exactly 'Size' number of bytes must be provided. + Content io.Reader + + // Close will be called when an object has finished processing. + // Note that if PutObjectsSnowball returns because of an error, + // objects not consumed from the input will NOT have been closed. + // Leave as nil for no callback. + Close func() +} + +type nopReadSeekCloser struct { + io.ReadSeeker +} + +func (n nopReadSeekCloser) Close() error { + return nil +} + +// This is available as io.ReadSeekCloser from go1.16 +type readSeekCloser interface { + io.Reader + io.Closer + io.Seeker +} + +// PutObjectsSnowball will put multiple objects with a single put call. +// A (compressed) TAR file will be created which will contain multiple objects. +// The key for each object will be used for the destination in the specified bucket. +// Total size should be < 5TB. +// This function blocks until 'objs' is closed and the content has been uploaded. +func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { + err = opts.Opts.validate() + if err != nil { + return err + } + var tmpWriter io.Writer + var getTmpReader func() (rc readSeekCloser, sz int64, err error) + if opts.InMemory { + b := bytes.NewBuffer(nil) + tmpWriter = b + getTmpReader = func() (readSeekCloser, int64, error) { + return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil + } + } else { + f, err := ioutil.TempFile("", "s3-putsnowballobjects-*") + if err != nil { + return err + } + name := f.Name() + tmpWriter = f + var once sync.Once + defer once.Do(func() { + f.Close() + }) + defer os.Remove(name) + getTmpReader = func() (readSeekCloser, int64, error) { + once.Do(func() { + f.Close() + }) + f, err := os.Open(name) + if err != nil { + return nil, 0, err + } + st, err := f.Stat() + if err != nil { + return nil, 0, err + } + return f, st.Size(), nil + } + } + flush := func() error { return nil } + if !opts.Compress { + if !opts.InMemory { + // Insert buffer for writes. + buf := bufio.NewWriterSize(tmpWriter, 1<<20) + flush = buf.Flush + tmpWriter = buf + } + } else { + s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) + flush = s2c.Close + defer s2c.Close() + tmpWriter = s2c + } + t := tar.NewWriter(tmpWriter) + +objectLoop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case obj, ok := <-objs: + if !ok { + break objectLoop + } + + closeObj := func() {} + if obj.Close != nil { + closeObj = obj.Close + } + + // Trim accidental slash prefix. + obj.Key = strings.TrimPrefix(obj.Key, "/") + header := tar.Header{ + Typeflag: tar.TypeReg, + Name: obj.Key, + Size: obj.Size, + ModTime: obj.ModTime, + Format: tar.FormatPAX, + } + if err := t.WriteHeader(&header); err != nil { + closeObj() + return err + } + n, err := io.Copy(t, obj.Content) + if err != nil { + closeObj() + return err + } + if n != obj.Size { + closeObj() + return io.ErrUnexpectedEOF + } + closeObj() + } + } + // Flush tar + err = t.Flush() + if err != nil { + return err + } + // Flush compression + err = flush() + if err != nil { + return err + } + if opts.Opts.UserMetadata == nil { + opts.Opts.UserMetadata = map[string]string{} + } + opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" + opts.Opts.DisableMultipart = true + rc, sz, err := getTmpReader() + if err != nil { + return err + } + defer rc.Close() + rand := c.random.Uint64() + _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 7ac3f26aa835c..0fee90226b686 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -29,11 +29,62 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) +//revive:disable + +// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. +type BucketOptions = RemoveBucketOptions + +//revive:enable + +// RemoveBucketOptions special headers to purge buckets, only +// useful when endpoint is MinIO +type RemoveBucketOptions struct { + ForceDelete bool +} + +// RemoveBucketWithOptions deletes the bucket name. +// +// All objects (including all object versions and delete markers) +// in the bucket will be deleted forcibly if bucket options set +// ForceDelete to 'true'. +func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + return nil +} + // RemoveBucket deletes the bucket name. // // All objects (including all object versions and delete markers). // in the bucket must be deleted before successfully attempting this request. -func (c Client) RemoveBucket(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -64,17 +115,19 @@ type AdvancedRemoveOptions struct { ReplicationDeleteMarker bool ReplicationStatus ReplicationStatus ReplicationMTime time.Time + ReplicationRequest bool } // RemoveObjectOptions represents options specified by user for RemoveObject call type RemoveObjectOptions struct { + ForceDelete bool GovernanceBypass bool VersionID string Internal AdvancedRemoveOptions } // RemoveObject removes an object from a bucket. -func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { +func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -83,11 +136,11 @@ func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, return err } - return c.removeObject(ctx, bucketName, objectName, opts) + res := c.removeObject(ctx, bucketName, objectName, opts) + return res.Err } -func (c Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { - +func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -112,6 +165,12 @@ func (c Client) removeObject(ctx context.Context, bucketName, objectName string, if !opts.Internal.ReplicationStatus.Empty() { headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) } + if opts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "") + } + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } // Execute DELETE on objectName. resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ bucketName: bucketName, @@ -122,19 +181,25 @@ func (c Client) removeObject(ctx context.Context, bucketName, objectName string, }) defer closeResponse(resp) if err != nil { - return err + return RemoveObjectResult{Err: err} } if resp != nil { // if some unexpected error happened and max retry is reached, we want to let client know if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) + err := httpRespToErrorResponse(resp, bucketName, objectName) + return RemoveObjectResult{Err: err} } } // DeleteObject always responds with http '204' even for // objects which do not exist. So no need to handle them // specifically. - return nil + return RemoveObjectResult{ + ObjectName: objectName, + ObjectVersionID: opts.VersionID, + DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true", + DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"), + } } // RemoveObjectError - container of Multi Delete S3 API error @@ -144,6 +209,17 @@ type RemoveObjectError struct { Err error } +// RemoveObjectResult - container of Multi Delete S3 API result +type RemoveObjectResult struct { + ObjectName string + ObjectVersionID string + + DeleteMarker bool + DeleteMarkerVersionID string + + Err error +} + // generateRemoveMultiObjects - generate the XML request for remove multi objects request func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { delObjects := []deleteObject{} @@ -153,21 +229,32 @@ func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { VersionID: obj.VersionID, }) } - xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: true}) + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false}) return xmlBytes } // processRemoveMultiObjectsResponse - parse the remove multi objects web service // and return the success/failure result status for each object -func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, errorCh chan<- RemoveObjectError) { +func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, resultCh chan<- RemoveObjectResult) { // Parse multi delete XML response rmResult := &deleteMultiObjectsResult{} err := xmlDecoder(body, rmResult) if err != nil { - errorCh <- RemoveObjectError{ObjectName: "", Err: err} + resultCh <- RemoveObjectResult{ObjectName: "", Err: err} return } + // Fill deletion that returned success + for _, obj := range rmResult.DeletedObjects { + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + // Only filled with versioned buckets + ObjectVersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + DeleteMarkerVersionID: obj.DeleteMarkerVersionID, + } + } + // Fill deletion that returned an error. for _, obj := range rmResult.UnDeletedObjects { // Version does not exist is not an error ignore and continue. @@ -175,9 +262,9 @@ func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, err case "InvalidArgument", "NoSuchVersion": continue } - errorCh <- RemoveObjectError{ - ObjectName: obj.Key, - VersionID: obj.VersionID, + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + ObjectVersionID: obj.VersionID, Err: ErrorResponse{ Code: obj.Code, Message: obj.Message, @@ -194,7 +281,7 @@ type RemoveObjectsOptions struct { // RemoveObjects removes multiple objects from a bucket while // it is possible to specify objects versions which are received from // objectsCh. Remove failures are sent back via error channel. -func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { +func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. @@ -214,10 +301,54 @@ func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh return errorCh } - go c.removeObjects(ctx, bucketName, objectsCh, errorCh, opts) + resultCh := make(chan RemoveObjectResult, 1) + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + go func() { + defer close(errorCh) + for res := range resultCh { + // Send only errors to the error channel + if res.Err == nil { + continue + } + errorCh <- RemoveObjectError{ + ObjectName: res.ObjectName, + VersionID: res.ObjectVersionID, + Err: res.Err, + } + } + }() + return errorCh } +// RemoveObjectsWithResult removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove results, successes and failures are sent back via +// RemoveObjectResult channel +func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult { + resultCh := make(chan RemoveObjectResult, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: err, + } + return resultCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return resultCh + } + + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + return resultCh +} + // Return true if the character is within the allowed characters in an XML 1.0 document // The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets func validXMLChar(r rune) (ok bool) { @@ -239,14 +370,14 @@ func hasInvalidXMLChar(str string) bool { } // Generate and call MultiDelete S3 requests based on entries received from objectsCh -func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) { +func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { maxEntries := 1000 finish := false urlValues := make(url.Values) urlValues.Set("delete", "") - // Close error channel when Multi delete finishes. - defer close(errorCh) + // Close result channel when Multi delete finishes. + defer close(resultCh) // Loop over entries by 1000 and call MultiDelete requests for { @@ -260,22 +391,20 @@ func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh for object := range objectsCh { if hasInvalidXMLChar(object.Key) { // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. - err := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ VersionID: object.VersionID, GovernanceBypass: opts.GovernanceBypass, }) - if err != nil { + if err := removeResult.Err; err != nil { // Version does not exist is not an error ignore and continue. switch ToErrorResponse(err).Code { case "InvalidArgument", "NoSuchVersion": continue } - errorCh <- RemoveObjectError{ - ObjectName: object.Key, - VersionID: object.VersionID, - Err: err, - } + resultCh <- removeResult } + + resultCh <- removeResult continue } @@ -315,29 +444,29 @@ func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh if resp != nil { if resp.StatusCode != http.StatusOK { e := httpRespToErrorResponse(resp, bucketName, "") - errorCh <- RemoveObjectError{ObjectName: "", Err: e} + resultCh <- RemoveObjectResult{ObjectName: "", Err: e} } } if err != nil { for _, b := range batch { - errorCh <- RemoveObjectError{ - ObjectName: b.Key, - VersionID: b.VersionID, - Err: err, + resultCh <- RemoveObjectResult{ + ObjectName: b.Key, + ObjectVersionID: b.VersionID, + Err: err, } } continue } // Process multiobjects remove xml response - processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + processRemoveMultiObjectsResponse(resp.Body, batch, resultCh) closeResponse(resp) } } // RemoveIncompleteUpload aborts an partially uploaded object. -func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { +func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -364,7 +493,7 @@ func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectNa // abortMultipartUpload aborts a multipart upload for the given // uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { +func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go new file mode 100644 index 0000000000000..9ec8f4f244067 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// RestoreType represents the restore request type +type RestoreType string + +const ( + // RestoreSelect represents the restore SELECT operation + RestoreSelect = RestoreType("SELECT") +) + +// TierType represents a retrieval tier +type TierType string + +const ( + // TierStandard is the standard retrieval tier + TierStandard = TierType("Standard") + // TierBulk is the bulk retrieval tier + TierBulk = TierType("Bulk") + // TierExpedited is the expedited retrieval tier + TierExpedited = TierType("Expedited") +) + +// GlacierJobParameters represents the retrieval tier parameter +type GlacierJobParameters struct { + Tier TierType +} + +// Encryption contains the type of server-side encryption used during object retrieval +type Encryption struct { + EncryptionType string + KMSContext string + KMSKeyID string `xml:"KMSKeyId"` +} + +// MetadataEntry represents a metadata information of the restored object. +type MetadataEntry struct { + Name string + Value string +} + +// S3 holds properties of the copy of the archived object +type S3 struct { + AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` + BucketName string + Prefix string + CannedACL *string `xml:"CannedACL,omitempty"` + Encryption *Encryption `xml:"Encryption,omitempty"` + StorageClass *string `xml:"StorageClass,omitempty"` + Tagging *tags.Tags `xml:"Tagging,omitempty"` + UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` +} + +// SelectParameters holds the select request parameters +type SelectParameters struct { + XMLName xml.Name `xml:"SelectParameters"` + ExpressionType QueryExpressionType + Expression string + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization +} + +// OutputLocation holds properties of the copy of the archived object +type OutputLocation struct { + XMLName xml.Name `xml:"OutputLocation"` + S3 S3 `xml:"S3"` +} + +// RestoreRequest holds properties of the restore object request +type RestoreRequest struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` + Type *RestoreType `xml:"Type,omitempty"` + Tier *TierType `xml:"Tier,omitempty"` + Days *int `xml:"Days,omitempty"` + GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` + Description *string `xml:"Description,omitempty"` + SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` + OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` +} + +// SetDays sets the days parameter of the restore request +func (r *RestoreRequest) SetDays(v int) { + r.Days = &v +} + +// SetGlacierJobParameters sets the GlacierJobParameters of the restore request +func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { + r.GlacierJobParameters = &v +} + +// SetType sets the type of the restore request +func (r *RestoreRequest) SetType(v RestoreType) { + r.Type = &v +} + +// SetTier sets the retrieval tier of the restore request +func (r *RestoreRequest) SetTier(v TierType) { + r.Tier = &v +} + +// SetDescription sets the description of the restore request +func (r *RestoreRequest) SetDescription(v string) { + r.Description = &v +} + +// SetSelectParameters sets SelectParameters of the restore select request +func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { + r.SelectParameters = &v +} + +// SetOutputLocation sets the properties of the copy of the archived object +func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { + r.OutputLocation = &v +} + +// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API +func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + restoreRequestBytes, err := xml.Marshal(req) + if err != nil { + return err + } + + urlValues := make(url.Values) + urlValues.Set("restore", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentMD5Base64: sumMD5Base64(restoreRequestBytes), + contentSHA256Hex: sum256Hex(restoreRequestBytes), + contentBody: bytes.NewReader(restoreRequestBytes), + contentLength: int64(len(restoreRequestBytes)), + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index 37ed97b725a0c..592d4cdccb126 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -121,8 +121,8 @@ func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement return err } - switch se := t.(type) { - case xml.StartElement: + se, ok := t.(xml.StartElement) + if ok { tagName := se.Name.Local switch tagName { case "Name", "Prefix", @@ -335,7 +335,7 @@ type deletedObject struct { VersionID string `xml:"VersionId,omitempty"` // These fields are ignored. DeleteMarker bool - DeleteMarkerVersionID string + DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` } // nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index e35cf02bf3fcd..5d47d7ec56a3b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -54,6 +54,13 @@ const ( SelectCompressionNONE SelectCompressionType = "NONE" SelectCompressionGZIP = "GZIP" SelectCompressionBZIP = "BZIP2" + + // Non-standard compression schemes, supported by MinIO hosts: + + SelectCompressionZSTD = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 = "LZ4" // LZ4 Stream + SelectCompressionS2 = "S2" // S2 Stream + SelectCompressionSNAPPY = "SNAPPY" // Snappy stream ) // CSVQuoteFields - is the parameter for how CSV fields are quoted. @@ -330,10 +337,10 @@ func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) er // SelectObjectInputSerialization - input serialization parameters type SelectObjectInputSerialization struct { - CompressionType SelectCompressionType - Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` - CSV *CSVInputOptions `xml:"CSV,omitempty"` - JSON *JSONInputOptions `xml:"JSON,omitempty"` + CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` } // SelectObjectOutputSerialization - output serialization parameters. @@ -431,7 +438,7 @@ const ( ) // SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. -func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { +func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -512,7 +519,7 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) { go func() { for { var prelude preludeInfo - var headers = make(http.Header) + headers := make(http.Header) var err error // Create CRC code @@ -617,7 +624,7 @@ func (p preludeInfo) PayloadLen() int64 { // the struct, func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { var err error - var pInfo = preludeInfo{} + pInfo := preludeInfo{} // reads total length of the message (first 4 bytes) pInfo.totalLen, err = extractUint32(prelude) @@ -745,7 +752,6 @@ func checkCRC(r io.Reader, expect uint32) error { if msgCRC != expect { return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - } return nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index aa81cc43771bc..6deb5f5dc9468 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -27,7 +27,7 @@ import ( // BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to // control cancellations and timeouts. -func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { +func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return false, err @@ -58,19 +58,7 @@ func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, erro } // StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - return c.statObject(ctx, bucketName, objectName, opts) -} - -// Lower level API for statObject supporting pre-conditions and range headers. -func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { +func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -99,11 +87,11 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o if err != nil { return ObjectInfo{}, err } - deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" if resp != nil { + deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker { + if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ StatusCode: resp.StatusCode, Code: "MethodNotAllowed", diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 9a957af58311a..ee637bd076dfd 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -34,6 +34,7 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "time" md5simd "github.com/minio/md5-simd" @@ -45,7 +46,7 @@ import ( // Client implements Amazon S3 compatible methods. type Client struct { - /// Standard options. + // Standard options. // Parsed endpoint url provided by the user. endpointURL *url.URL @@ -90,6 +91,8 @@ type Client struct { // Factory for MD5 hash functions. md5Hasher func() md5simd.Hasher sha256Hasher func() md5simd.Hasher + + healthStatus int32 } // Options for New method @@ -108,7 +111,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.10" + libraryVersion = "v7.0.24" ) // User Agent should always following the below style. @@ -179,67 +182,6 @@ func (r *lockedRandSource) Seed(seed int64) { r.lk.Unlock() } -// Redirect requests by re signing the request. -func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { - if len(via) >= 5 { - return errors.New("stopped after 5 redirects") - } - if len(via) == 0 { - return nil - } - lastRequest := via[len(via)-1] - var reAuth bool - for attr, val := range lastRequest.Header { - // if hosts do not match do not copy Authorization header - if attr == "Authorization" && req.Host != lastRequest.Host { - reAuth = true - continue - } - if _, ok := req.Header[attr]; !ok { - req.Header[attr] = val - } - } - - *c.endpointURL = *req.URL - - value, err := c.credsProvider.Get() - if err != nil { - return err - } - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - region = c.region - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if reAuth { - // Check if there is no region override, if not get it from the URL if possible. - if region == "" { - region = s3utils.GetRegionFromURL(*c.endpointURL) - } - switch { - case signerType.IsV2(): - return errors.New("signature V2 cannot support redirection") - case signerType.IsV4(): - signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) - } - } - return nil -} - func privateNew(endpoint string, opts *Options) (*Client, error) { // construct endpoint. endpointURL, err := getEndpointURL(endpoint, opts.Secure) @@ -276,9 +218,11 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { // Instantiate http client and bucket location cache. clnt.httpClient = &http.Client{ - Jar: jar, - Transport: transport, - CheckRedirect: clnt.redirectHeaders, + Jar: jar, + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, } // Sets custom region, if region is empty bucket location cache is used automatically. @@ -305,6 +249,10 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup + + // healthcheck is not initialized + clnt.healthStatus = unknown + // Return. return clnt, nil } @@ -387,17 +335,81 @@ func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5sim return hashAlgos, hashSums } +const ( + unknown = -1 + offline = 0 + online = 1 +) + +// IsOnline returns true if healthcheck enabled and client is online +func (c *Client) IsOnline() bool { + return !c.IsOffline() +} + +// sets online healthStatus to offline +func (c *Client) markOffline() { + atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) +} + +// IsOffline returns true if healthcheck enabled and client is offline +func (c *Client) IsOffline() bool { + return atomic.LoadInt32(&c.healthStatus) == offline +} + +// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function +// and and error if health check is already started +func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { + if atomic.LoadInt32(&c.healthStatus) == online { + return nil, fmt.Errorf("health check is running") + } + if hcDuration < 1*time.Second { + return nil, fmt.Errorf("health check duration should be atleast 1 second") + } + ctx, cancelFn := context.WithCancel(context.Background()) + atomic.StoreInt32(&c.healthStatus, online) + probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") + go func(duration time.Duration) { + timer := time.NewTimer(duration) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + atomic.StoreInt32(&c.healthStatus, unknown) + return + case <-timer.C: + timer.Reset(duration) + // Do health check the first time and ONLY if the connection is marked offline + if c.IsOffline() { + gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if IsNetworkOrHostDown(err, false) { + // Still network errors do not need to do anything. + continue + } + switch ToErrorResponse(err).Code { + case "NoSuchBucket", "AccessDenied", "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } + } + } + } + }(hcDuration) + return cancelFn, nil +} + // requestMetadata - is container for all the values to make a request. type requestMetadata struct { // If set newRequest presigns the URL. presignURL bool // User supplied. - bucketName string - objectName string - queryValues url.Values - customHeader http.Header - expires int64 + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + extraPresignHeader http.Header + expires int64 // Generated by our internal code. bucketLocation string @@ -408,7 +420,7 @@ type requestMetadata struct { } // dumpHTTP - dump HTTP request and response. -func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { +func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") if err != nil { @@ -468,8 +480,14 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { } // do - execute http request. -func (c Client) do(req *http.Request) (*http.Response, error) { - resp, err := c.httpClient.Do(req) +func (c *Client) do(req *http.Request) (resp *http.Response, err error) { + defer func() { + if IsNetworkOrHostDown(err, false) { + c.markOffline() + } + }() + + resp, err = c.httpClient.Do(req) if err != nil { // Handle this specifically for now until future Golang versions fix this issue properly. if urlErr, ok := err.(*url.Error); ok { @@ -512,10 +530,14 @@ var successStatus = []int{ // executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. -func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { +func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + if c.IsOffline() { + return nil, errors.New(c.endpointURL.String() + " is offline.") + } + var retryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = MaxRetry // Indicates how many times we can retry the request + reqRetry := MaxRetry // Indicates how many times we can retry the request if metadata.contentBody != nil { // Check if body is seekable then it is retryable. @@ -544,9 +566,6 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque // Indicate to our routine to exit cleanly upon return. defer cancel() - // Blank indentifier is kept here on purpose since 'range' without - // blank identifiers is only supported since go1.4 - // https://golang.org/doc/go1.4#forrange. for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { // Retry executes the following function body if request has an // error until maxRetries have been exhausted, retry attempts are @@ -568,9 +587,9 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque if isS3CodeRetryable(errResponse.Code) { continue // Retry. } + return nil, err } - // Initiate the request. res, err = c.do(req) if err != nil { @@ -668,7 +687,7 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque } // newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { +func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { // If no method is supplied default to 'POST'. if method == "" { method = http.MethodPost @@ -736,6 +755,14 @@ func (c Client) newRequest(ctx context.Context, method string, metadata requestM if signerType.IsAnonymous() { return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") } + if metadata.extraPresignHeader != nil { + if signerType.IsV2() { + return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") + } + for k, v := range metadata.extraPresignHeader { + req.Header.Set(k, v[0]) + } + } if signerType.IsV2() { // Presign URL with signature v2. req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) @@ -808,7 +835,7 @@ func (c Client) newRequest(ctx context.Context, method string, metadata requestM } // set User agent. -func (c Client) setUserAgent(req *http.Request) { +func (c *Client) setUserAgent(req *http.Request) { req.Header.Set("User-Agent", libraryUserAgent) if c.appInfo.appName != "" && c.appInfo.appVersion != "" { req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) @@ -816,7 +843,7 @@ func (c Client) setUserAgent(req *http.Request) { } // makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { +func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { host := c.endpointURL.Host // For Amazon S3 endpoint, try to fetch location based endpoint. if s3utils.IsAmazonEndpoint(*c.endpointURL) { @@ -831,8 +858,8 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html host = c.s3AccelerateEndpoint } else { - // Do not change the host if the endpoint URL is a FIPS S3 endpoint. - if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { // Fetch new host based on the bucket location. host = getS3Endpoint(bucketLocation) } @@ -848,10 +875,14 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV if h, p, err := net.SplitHostPort(host); err == nil { if scheme == "http" && p == "80" || scheme == "https" && p == "443" { host = h + if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + host = "[" + h + "]" + } } } urlStr := scheme + "://" + host + "/" + // Make URL only if bucketName is available, otherwise use the // endpoint URL. if bucketName != "" { @@ -861,13 +892,13 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV if isVirtualHostStyle { urlStr = scheme + "://" + bucketName + "." + host + "/" if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) + urlStr += s3utils.EncodePath(objectName) } } else { // If not fall back to using path style. urlStr = urlStr + bucketName + "/" if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) + urlStr += s3utils.EncodePath(objectName) } } } diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go index 156150f622fa3..b7d99c69313e1 100644 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -73,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) { // GetBucketLocation - get location for the bucket name from location cache, if not // fetch freshly by making a new request. -func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { +func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } @@ -82,7 +82,7 @@ func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (strin // getBucketLocation - Get location for the bucketName from location map cache, if not // fetch freshly by making a new request. -func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { +func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } @@ -169,7 +169,7 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck } // getBucketLocationRequest - Wrapper creates a new getBucketLocation request. -func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { +func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { // Set location query. urlValues := make(url.Values) urlValues.Set("location", "") @@ -181,6 +181,9 @@ func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { targetURL.Host = h + if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + targetURL.Host = "[" + h + "]" + } } } @@ -188,7 +191,7 @@ func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) var urlStr string - //only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint + // only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) { urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" } else { diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index 5e5aec7c2ef7f..dee83b870cc55 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -17,15 +17,15 @@ package minio -/// Multipart upload defaults. +// Multipart upload defaults. // absMinPartSize - absolute minimum part size (5 MiB) below which // a part in a multipart upload may not be uploaded. const absMinPartSize = 1024 * 1024 * 5 -// minPartSize - minimum part size 128MiB per object after which +// minPartSize - minimum part size 16MiB per object after which // putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 128 +const minPartSize = 1024 * 1024 * 16 // maxPartsCount - maximum number of parts for a single multipart session. const maxPartsCount = 10000 @@ -69,6 +69,7 @@ const ( amzVersionID = "X-Amz-Version-Id" amzTaggingCount = "X-Amz-Tagging-Count" amzExpiration = "X-Amz-Expiration" + amzRestore = "X-Amz-Restore" amzReplicationStatus = "X-Amz-Replication-Status" amzDeleteMarker = "X-Amz-Delete-Marker" @@ -88,4 +89,13 @@ const ( minIOBucketSourceETag = "X-Minio-Source-Etag" minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" + minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" + // Header indicates last tag update time on source + minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" + // Header indicates last retention update time on source + minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" + // Header indicates last legalhold update time on source + minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" + + minIOForceDelete = "x-minio-force-delete" ) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index 2bf4edf016103..c2a90239f221d 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -46,13 +46,13 @@ func NewCore(endpoint string, opts *Options) (*Core, error) { // ListObjects - List all the objects at a prefix, optionally with marker and delimiter // you can further filter the results. func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys) + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) } // ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses // continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { - return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys) +func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) } // CopyObject - copies an object from source object to destination object on server side. @@ -63,8 +63,8 @@ func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBu // CopyObjectPart - creates a part in a multipart upload by copying (a // part of) an existing object. func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, partID, startOffset, length, metadata) } @@ -97,10 +97,10 @@ func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID stri } // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) { +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) { res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ Parts: parts, - }) + }, opts) return res.ETag, err } @@ -125,9 +125,3 @@ func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { return c.getObject(ctx, bucketName, objectName, opts) } - -// StatObject is a lower level API implemented to support special -// conditions matching etag, modtime on a request. -func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.statObject(ctx, bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go new file mode 100644 index 0000000000000..59f347efffef5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -0,0 +1,12225 @@ +//go:build mint +// +build mint + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "archive/zip" + "bytes" + "context" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" + log "github.com/sirupsen/logrus" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/tags" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +func cleanEmptyEntries(fields log.Fields) log.Fields { + cleanFields := log.Fields{} + for k, v := range fields { + if v != "" { + cleanFields[k] = v + } + } + return cleanFields +} + +// log successful test runs +func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} + return log.WithFields(cleanEmptyEntries(fields)) +} + +// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, +// and log as NA in that case and continue execution. Otherwise log as failure and return +func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) { + // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests + // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in + // addition to NotImplemented error returned from server + if isErrNotImplemented(err) { + ignoredLog(testName, function, args, startTime, message).Info() + } else if isRunOnFail() { + failureLog(testName, function, args, startTime, alert, message, err).Error() + } else { + failureLog(testName, function, args, startTime, alert, message, err).Fatal() + } +} + +// log failed test runs +func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + var fields log.Fields + // log with the fields as per mint + if err != nil { + fields = log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err, + } + } else { + fields = log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, + } + } + return log.WithFields(cleanEmptyEntries(fields)) +} + +// log not applicable test runs +func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + fields := log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented", + } + return log.WithFields(cleanEmptyEntries(fields)) +} + +// Delete objects in given bucket, recursively +func cleanupBucket(bucketName string, c *minio.Client) error { + // Create a done channel to control 'ListObjectsV2' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + // Iterate over all objects in the bucket via listObjectsV2 and delete + for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { + if objCh.Err != nil { + return objCh.Err + } + if objCh.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + return err + } + return err +} + +func cleanupVersionedBucket(bucketName string, c *minio.Client) error { + doneCh := make(chan struct{}) + defer close(doneCh) + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + if obj.Err != nil { + return obj.Err + } + if obj.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, obj.Key, + minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + log.Println("found", obj.Key, obj.VersionID) + } + return err + } + return err +} + +func isErrNotImplemented(err error) bool { + return minio.ToErrorResponse(err).Code == "NotImplemented" +} + +func isRunOnFail() bool { + return os.Getenv("RUN_ON_FAIL") == "1" +} + +func init() { + // If server endpoint is not set, all tests default to + // using https://play.min.io + if os.Getenv(serverEndpoint) == "" { + os.Setenv(serverEndpoint, "play.min.io") + os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") + os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") + os.Setenv(enableHTTPS, "1") + } +} + +var mintDataDir = os.Getenv("MINT_DATA_DIR") + +func getMintDataDirFilePath(filename string) (fp string) { + if mintDataDir == "" { + return + } + return filepath.Join(mintDataDir, filename) +} + +func newRandomReader(seed, size int64) io.Reader { + return io.LimitReader(rand.New(rand.NewSource(seed)), size) +} + +func mustCrcReader(r io.Reader) uint32 { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + return crc.Sum32() +} + +func crcMatches(r io.Reader, want uint32) error { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +func crcMatchesName(r io.Reader, name string) error { + want := dataFileCRC32[name] + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +// read data from file if it exists or optionally create a buffer of particular size +func getDataReader(fileName string) io.ReadCloser { + if mintDataDir == "" { + size := int64(dataFileMap[fileName]) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) + } + return ioutil.NopCloser(newRandomReader(size, size)) + } + reader, _ := os.Open(getMintDataDirFilePath(fileName)) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(reader) + reader.Close() + reader, _ = os.Open(getMintDataDirFilePath(fileName)) + } + return reader +} + +// randString generates random names and prepends them with a known prefix. +func randString(n int, src rand.Source, prefix string) string { + b := make([]byte, n) + // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +var dataFileMap = map[string]int{ + "datafile-0-b": 0, + "datafile-1-b": 1, + "datafile-1-kB": 1 * humanize.KiByte, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-65-MB": 65 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, +} + +var dataFileCRC32 = map[string]uint32{} + +func isFullMode() bool { + return os.Getenv("MINT_MODE") == "full" +} + +func getFuncName() string { + return getFuncNameLoc(2) +} + +func getFuncNameLoc(caller int) string { + pc, _, _, _ := runtime.Caller(caller) + return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket Failed", err) + return + } + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "Bucket already exists", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testMetadataSizeLimit() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Get Object failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat Object failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return + } + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content types don't match", err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testListObjectVersions() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjectVersions(bucketName, prefix, recursive)" + args := map[string]interface{}{ + "bucketName": "", + "prefix": "", + "recursive": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected object deletion", err) + return + } + + var deleteMarkers, versions int + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if info.Key != objectName { + logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) + return + } + if info.IsDeleteMarker { + deleteMarkers++ + if !info.IsLatest { + logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) + return + } + } else { + versions++ + } + } + + if deleteMarkers != 1 { + logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) + return + } + + if versions != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStatObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "StatObject" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + for i := 0; i < len(results); i++ { + opts := minio.StatObjectOptions{VersionID: results[i].VersionID} + statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during HEAD object", err) + return + } + if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testGetObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Save the contents of datafiles to check with GetObject() reader output later + var buffers [][]byte + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + buffers = append(buffers, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.SliceStable(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + const n = 10 + // Read input... + + // Save the data concurrently. + var wg sync.WaitGroup + wg.Add(n) + buffers := make([][]byte, n) + var errs [n]error + for i := 0; i < n; i++ { + r := newRandomReader(int64((1<<20)*i+i), int64(i)) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + buffers[i] = buf + + go func(i int) { + defer wg.Done() + _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.Slice(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.Slice(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testConcurrentCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy concurrently + const n = 10 + var wg sync.WaitGroup + wg.Add(n) + var errs [n]error + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) + infos = []minio.ObjectInfo{} + for info := range objectsInfo { + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + infos = append(infos, info) + } + + if len(infos) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + testFiles := []string{"datafile-5-MB", "datafile-10-kB"} + var testFilesBytes [][]byte + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + testFilesBytes = append(testFilesBytes, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Source objects to concatenate. We also specify decryption + // key for each + src1 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[0].VersionID, + } + + src2 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[1].VersionID, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + + _, err = c.ComposeObject(context.Background(), dst, src1, src2) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) + return + } + defer readerCopy.Close() + + copyContentBytes, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) + return + } + + var expectedContent []byte + for _, fileBytes := range testFilesBytes { + expectedContent = append(expectedContent, fileBytes...) + } + + if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var version minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + version = info + break + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectsWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObjects()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsVersions := make(chan minio.ObjectInfo) + go func() { + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, + minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsVersionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + objectsVersions <- info + } + close(objectsVersions) + }() + + removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) + return + } + + for e := range removeErrors { + if e.Err != nil { + logError(testName, function, args, startTime, "", "Single delete operation failed", err) + return + } + } + + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsVersionsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testObjectTaggingWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "{Get,Set,Remove}ObjectTagging()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var versions []minio.ObjectInfo + for info := range versionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + versions = append(versions, info) + } + + sort.SliceStable(versions, func(i, j int) bool { + return versions[i].Size < versions[j].Size + }) + + tagsV1 := map[string]string{"key1": "val1"} + t1, err := tags.MapToObjectTags(tagsV1) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + tagsV2 := map[string]string{"key2": "val2"} + t2, err := tags.MapToObjectTags(tagsV2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + tagsEqual := func(tags1, tags2 map[string]string) bool { + for k1, v1 := range tags1 { + v2, found := tags2[k1] + if found { + if v1 != v2 { + return false + } + } + } + return true + } + + gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) + return + } + + gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) + return + } + + if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, + minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if len(emptyTags.ToMap()) != 0 { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + data := []byte{} + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := newRandomReader(size, size) + ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != size { + logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if objInfo.Size != size { + logError(testName, function, args, startTime, "", "Unexpected size", err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + pos, err := r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + buf2 := make([]byte, 100) + m, err := readFull(r, buf2) + if err != nil { + logError(testName, function, args, startTime, "", "Error reading through readFull", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + logError(testName, function, args, startTime, "", "Values at same index dont match", err) + return + } + pos, err = r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + if err = r.Close(); err != nil { + logError(testName, function, args, startTime, "", "ObjectClose failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test RemoveObjects request where context cancels after timeout +func testRemoveObjectsContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan minio.ObjectInfo) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjects API with short timeout. + errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 200 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test removing multiple objects and check for results +func testRemoveMultipleObjectsWithResult() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupVersionedBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + nrObjects := 10 + nrLockedObjects := 5 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if i < nrLockedObjects { + // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC) + t := time.Now().Add(5 * time.Minute) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + VersionID: info.VersionID, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + } + + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + var foundNil, foundErr int + + for { + // Check if errorCh doesn't receive any error + select { + case deleteRes, ok := <-resultCh: + if !ok { + goto out + } + if deleteRes.ObjectName == "" { + logError(testName, function, args, startTime, "", "Unexpected object name", nil) + return + } + if deleteRes.ObjectVersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected object version ID", nil) + return + } + + if deleteRes.Err == nil { + foundNil++ + } else { + foundErr++ + } + } + } +out: + if foundNil+foundErr != nrObjects { + logError(testName, function, args, startTime, "", "Unexpected number of results", nil) + return + } + + if foundNil != nrObjects-nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil) + return + } + + if foundErr != nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of errors", nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + fileName := getMintDataDirFilePath("datafile-129-MB") + if fileName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := dataFileMap["datafile-129-MB"] + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + if objInfo.Size != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return + } + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType doesn't match", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + + // Make a new bucket. + args["bucketName"] = bucketName + args["location"] = location + function = "MakeBucket(bucketName, location)" + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-129-MB") + if fName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + function = "FPutObject(bucketName, objectName, fileName, opts)" + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + "-standard" + args["fileName"] = fName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + if ui.Size != int64(dataFileMap["datafile-129-MB"]) { + logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + srcFile, err := os.Open(fName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File create failed", err) + return + } + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + tmpFile.Close() + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-standard" + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-Octet" + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-GTar" + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) + return + } + + os.Remove(fName + ".gtar") + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + cancel() + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object with s3zip extensions. +func testGetObjectS3Zip() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{"x-minio-extract": true} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" + args["objectName"] = objectName + + var zipFile bytes.Buffer + zw := zip.NewWriter(&zipFile) + rng := rand.New(rand.NewSource(0xc0cac01a)) + const nFiles = 500 + for i := 0; i <= nFiles; i++ { + if i == nFiles { + // Make one large, compressible file. + i = 1000000 + } + b := make([]byte, i) + if i < nFiles { + rng.Read(b) + } + wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) + if err != nil { + logError(testName, function, args, startTime, "", "zw.Create failed", err) + return + } + wc.Write(b) + } + err = zw.Close() + if err != nil { + logError(testName, function, args, startTime, "", "zw.Close failed", err) + return + } + buf := zipFile.Bytes() + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) + return + } + r.Close() + + zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + logError(testName, function, args, startTime, "", "zip.NewReader failed", err) + return + } + lOpts := minio.ListObjectsOptions{} + lOpts.Set("x-minio-extract", "true") + lOpts.Prefix = objectName + "/" + lOpts.Recursive = true + list := c.ListObjects(context.Background(), bucketName, lOpts) + listed := map[string]minio.ObjectInfo{} + for item := range list { + if item.Err != nil { + break + } + listed[item.Key] = item + } + if len(listed) == 0 { + // Assume we are running against non-minio. + args["SKIPPED"] = true + ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info() + return + } + + for _, file := range zr.File { + if file.FileInfo().IsDir() { + continue + } + args["zipfile"] = file.Name + zfr, err := file.Open() + if err != nil { + logError(testName, function, args, startTime, "", "file.Open failed", err) + return + } + want, err := ioutil.ReadAll(zfr) + if err != nil { + logError(testName, function, args, startTime, "", "fzip file read failed", err) + return + } + + opts := minio.GetObjectOptions{} + opts.Set("x-minio-extract", "true") + key := path.Join(objectName, file.Name) + r, err = c.GetObject(context.Background(), bucketName, key, opts) + if err != nil { + terr := minio.ToErrorResponse(err) + if terr.StatusCode != http.StatusNotFound { + logError(testName, function, args, startTime, "", "GetObject failed", err) + } + return + } + got, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + r.Close() + if !bytes.Equal(want, got) { + logError(testName, function, args, startTime, "", "Content mismatch", err) + return + } + oi, ok := listed[key] + if !ok { + logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) + return + } + if int(oi.Size) != len(got) { + logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) + return + } + delete(listed, key) + } + delete(args, "zipfile") + if len(listed) > 0 { + logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, len(buf)) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return + } + if err := policy.SetKey(""); err == nil { + logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return + } + if err := policy.SetKeyStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return + } + if err := policy.SetContentType(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return + } + if err := policy.SetContentTypeStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return + } + + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + filePath := getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName + + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(dst, src)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + // Set copy conditions. + MatchETag: objInfo.ETag, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + } + args["src"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return + } + + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "copy data CRC check failed", err) + return + } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + ReplaceMetadata: true, + UserMetadata: map[string]string{ + "Copy": "should be same", + }, + } + args["dst"] = dst + args["src"] = src + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(oi.ETag) + objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderSeeker interface methods. +func testSSECEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderSeeker interface methods. +func testSSES3EncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderAt interface methods. +func testSSECEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderAt interface methods. +func testSSES3EncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSECEncryptionPutGet tests encryption with customer provided encryption keys +func testSSECEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestEncryptionFPut tests encryption with customer specified encryption keys +func testSSECEncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSES3EncryptionPutGet tests SSE-S3 encryption +func testSSES3EncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back without any encryption headers + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestSSES3EncryptionFPut tests server side encryption +func testSSES3EncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + successLogger(testName, function, args, startTime).Info() +} + +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := notification.NewConfig(topicArn) + topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := notification.NewConfig(queueArn) + queueConfig.AddEvents(notification.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + config := notification.Configuration{} + config.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + config.AddTopic(topicConfig) + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Duplicate entry added", err) + return + } + + // Add and remove a queue config + config.AddQueue(queueConfig) + config.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(context.Background(), bucketName, config) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) + return + } + + config, err = c.GetBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) + return + } + + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Topic config is empty", err) + return + } + + if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) + return + } + + err = c.RemoveAllBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctional()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket. + function = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" + args["bucketName"] = bucketName + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + + defer cleanupBucket(bucketName, c) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File creation failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "File write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Asserting the default bucket policy. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) + return + } + + // Set the bucket policy to 'public readonly'. + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readOnlyPolicy, + } + + err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public writeonly'. + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": writeOnlyPolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `writeonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readwrite`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjects()" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) + return + } + newReader.Close() + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + resp.Body.Close() + + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedGetObject success", err) + return + } + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedPutObject success", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err != nil { + logError(testName, function, args, startTime, "", "Presigned failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + req.Header.Add("mysecret", "abcxxx") + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) + return + } + + function = "RemoveObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + args["objectName"] = objectName + "-f" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-nolength" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presigned" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presign-custom" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + function = "RemoveBucket(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + err = c.RemoveBucket(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + err = c.RemoveBucket(context.Background(), bucketName) + if err == nil { + logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return + } + if err.Error() != "The specified bucket does not exist" { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectModified() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + objectName := "myobject" + args["objectName"] = objectName + content := "helloworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold" + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + var tempfile *os.File + + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName + } else { + tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + length := 100 * humanize.KiByte + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(testName, function, args, startTime, "", "TempFile seek failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + tempfile.Close() + + obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer obj.Close() + + n, err := obj.Seek(int64(offset), 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != int64(offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + region := "eu-west-1" + args["bucketName"] = bucketName + args["region"] = region + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(fileName, fileName+".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "Rename failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers and sizes + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if rStandard.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return + } + + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return + } + + if rOctet.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) + return + } + + os.Remove(fileName + ".gtar") + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + n, err := r.Seek(offset, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + n, err = r.Seek(0, 1) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + _, err = r.Seek(offset, 2) + if err == nil { + logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return + } + n, err = r.Seek(-offset, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != st.Size-offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != (offset - 1) { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, bufSize) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, bufSize+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + args["source"] = src + + // Set copy conditions. + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + args["destination"] = dst + + // Perform the Copy + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return + } + + // Close all the readers. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.CopySrcOptions{} + srcSlice := srcArr[:] + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "object", + } + + args["destination"] = dst + // Just explain about srcArr in args["sourceList"] + // to stop having 10,001 null headers logged + args["sourceList"] = "source array of 10,001 elements" + if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { + logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) + return + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + logError(testName, function, args, startTime, "", "Got unexpected error", err) + return + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "badObject", + MatchRange: true, + Start: 1, + End: badSrcSize, + } + + // 3. ComposeObject call should fail. + if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { + logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) + return + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + logError(testName, function, args, startTime, "", "Got invalid error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{ + "destination": "", + "sourceList": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.CopySrcOptions{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + }) + } + + // make the last part very small + srcs[9].MatchRange = true + + args["sourceList"] = srcs + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + } + args["destination"] = dst + + ui, err := c.ComposeObject(context.Background(), dst, srcs...) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + if ui.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) + return + } + + objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test concatenating multiple 10K objects V2 +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "object", + Encryption: sse, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: newSSE, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + + delete(args, "objectName") + successLogger(testName, function, args, startTime).Info() +} + +func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { + // initialize logging params + startTime := time.Now() + testName := getFuncNameLoc(2) + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + var srcEncryption, dstEncryption encrypt.ServerSide + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + if sseSrc != nil && sseSrc.Type() != encrypt.S3 { + srcEncryption = sseSrc + } + + // 2. copy object and change encryption key + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: srcEncryption, + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + Encryption: sseDst, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + if sseDst != nil && sseDst.Type() != encrypt.S3 { + dstEncryption = sseDst + } + // 3. get copied object and check if content is equal + coreClient := minio.Core{c} + reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test key rotation for source object in-place. + var newSSE encrypt.ServerSide + if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { + newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + } + if sseSrc != nil && sseSrc.Type() == encrypt.S3 { + newSSE = encrypt.NewSSE() + } + if newSSE != nil { + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test in-place decryption. + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + } + args["destination"] = dst + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["source"] = src + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) + return + } + } + + // Get copied decrypted object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test encrypted copy object +func testUnencryptedToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc, sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + Encryption: encrypt.SSECopy(encryption), + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "decrypted-" + objectName, + } + args["destination"] = dst + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 6*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + return + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + return + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation +func testSSECEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy +func testSSECEncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + var dstencryption encrypt.ServerSide + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy +func testSSECEncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part +func testUnencryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testUnencryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testUnencryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part +func testSSES3EncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testSSES3EncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testSSES3EncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + h.Add(k, vs[0]) + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(context.Background(), bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) + return + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 2. create source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + } + + // 2.1 create destination with metadata set + dst1 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-1", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + args["source"] = src + args["destination"] = dst1 + _, err = c.CopyObject(context.Background(), dst1, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 4. create destination with no metadata set and same source + dst2 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-2", + } + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + args["source"] = src + args["destination"] = dst2 + _, err = c.CopyObject(context.Background(), dst2, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 6. Compose a pair of sources. + dst3 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-3", + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst3 + _, err = c.ComposeObject(context.Background(), dst3, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 7. Compose a pair of sources with dest user metadata set. + dst4 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-4", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst4 + _, err = c.ComposeObject(context.Background(), dst4, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testStorageClassMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClass") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassInvalidMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassInvalidMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassMetadataCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataCopyObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) + + // Put an object with RRS Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClass", + } + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClassCopy") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + // Put an object with Standard Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectSSClass", + } + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectSSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + return + } + // Fetch the meta data of copied object + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + args["size"] = bufSize + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != 4 { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) + return + } + + if st.Size != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating multiple 10K objects V4 +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctionalV2()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + // Make a new bucket. + function = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" + args = map[string]interface{}{ + "bucketName": bucketName, + "location": location, + } + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + objectNameNoLength := objectName + "-nolength" + args["objectName"] = objectNameNoLength + _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) + return + } + + incompObjNotFound := true + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FgetObject failed", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "Got empty ETag", err) + return + } + resp.Body.Close() + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + args["reqParams"] = reqParams + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err == nil { + logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test get object with GetObject with context +func testGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "object Close() call failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with FGetObject with a user provided context +func testFGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with GetObject with a user provided context +func testGetObjectRanges() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + rng := rand.NewSource(time.Now().UnixNano()) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rng, "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rng, "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + tests := []struct { + start int64 + end int64 + }{ + { + start: 1024, + end: 1024 + 1<<20, + }, + { + start: 20e6, + end: 20e6 + 10000, + }, + { + start: 40e6, + end: 40e6 + 10000, + }, + { + start: 60e6, + end: 60e6 + 10000, + }, + { + start: 80e6, + end: 80e6 + 10000, + }, + { + start: 120e6, + end: int64(bufSize), + }, + } + for _, test := range tests { + wantRC := getDataReader("datafile-129-MB") + io.CopyN(ioutil.Discard, wantRC, test.start) + want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) + opts := minio.GetObjectOptions{} + opts.SetRange(test.start, test.end) + args["opts"] = fmt.Sprintf("%+v", test) + obj, err := c.GetObject(ctx, bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + err = crcMatches(obj, want) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object ACLs with GetObjectACL with custom provided context +func testGetObjectACLContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACL(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(context.Background(), bucketName, + objectName, reader, int64(bufSize), + minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + UserMetadata: metaData, + }) + + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr != nil { + logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + // Do a very limited testing if this is not AWS S3 + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + if s[0] != "private" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + successLogger(testName, function, args, startTime).Info() + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + reader2 := getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + bufSize := dataFileMap["datatfile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["size"] = bufSize + defer cancel() + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with GetObject with custom context +func testGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", " object Close() call failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with FGetObject with custom context +func testFGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datatfile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test list object v1 and V2 +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testObjects := []struct { + name string + storageClass string + }{ + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } + + testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(context.Background(), bucket, opts) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + return + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + return + } + } + + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) + + successLogger(testName, function, args, startTime).Info() +} + +// Test deleting multiple objects with object retention set in Governance mode +func testRemoveObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + return + } + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + } + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + + objectsCh := make(chan minio.ObjectInfo) + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh <- object + } + }() + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + if rErr.Err == nil { + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + } + + objectsCh1 := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh1) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh1 <- object + } + }() + + opts1 := minio.RemoveObjectsOptions{ + GovernanceBypass: true, + } + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +func main() { + // Output to stdout instead of the default stderr + log.SetOutput(os.Stdout) + // create custom formatter + mintFormatter := mintJSONFormatter{} + // set custom formatter + log.SetFormatter(&mintFormatter) + // log Info or above -- success cases are Info level, failures are Fatal level + log.SetLevel(log.InfoLevel) + + tls := mustParseBool(os.Getenv(enableHTTPS)) + kms := mustParseBool(os.Getenv(enableKMS)) + if os.Getenv(enableKMS) == "" { + // Default to KMS tests. + kms = true + } + + // execute tests + if isFullMode() { + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testGetObjectRanges() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testUserMetadataCopyingV2() + testPutObject0ByteV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectContextV2() + testFPutObjectContextV2() + testFGetObjectContextV2() + testPutObjectContextV2() + testPutObjectWithVersioning() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testGetObjectS3Zip() + testRemoveMultipleObjects() + testRemoveMultipleObjectsWithResult() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() + testPresignedPostPolicy() + testCopyObject() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testBucketNotification() + testFunctional() + testGetObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectContext() + testFPutObjectContext() + testFGetObjectContext() + testGetObjectACLContext() + testPutObjectContext() + testStorageClassMetadataPutObject() + testStorageClassInvalidMetadataPutObject() + testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() + testRemoveObjects() + testListObjectVersions() + testStatObjectWithVersioning() + testGetObjectWithVersioning() + testCopyObjectWithVersioning() + testConcurrentCopyObjectWithVersioning() + testComposeObjectWithVersioning() + testRemoveObjectWithVersioning() + testRemoveObjectsWithVersioning() + testObjectTaggingWithVersioning() + + // SSE-C tests will only work over TLS connection. + if tls { + testSSECEncryptionPutGet() + testSSECEncryptionFPut() + testSSECEncryptedGetObjectReadAtFunctional() + testSSECEncryptedGetObjectReadSeekFunctional() + testEncryptedCopyObjectV2() + testEncryptedSSECToSSECCopyObject() + testEncryptedSSECToUnencryptedCopyObject() + testUnencryptedToSSECCopyObject() + testUnencryptedToUnencryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() + testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() + testSSECEncryptedToUnencryptedCopyPart() + testUnencryptedToSSECCopyObjectPart() + testUnencryptedToUnencryptedCopyPart() + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + } + + // KMS tests + if kms { + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() + } + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index 3b1b547b9468d..107a11b14cd95 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -18,6 +18,7 @@ package credentials import ( + "bytes" "encoding/hex" "encoding/xml" "errors" @@ -184,11 +185,26 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume } defer closeResponse(resp) if resp.StatusCode != http.StatusOK { - return AssumeRoleResponse{}, errors.New(resp.Status) + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return AssumeRoleResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleResponse{}, errResp } a := AssumeRoleResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil { return AssumeRoleResponse{}, err } return a, nil diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 62d1701eee37d..6b93a27fbd476 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -22,8 +22,13 @@ import ( "time" ) -// STSVersion sts version string -const STSVersion = "2011-06-15" +const ( + // STSVersion sts version string + STSVersion = "2011-06-15" + + // How much duration to slash from the given expiration duration + defaultExpiryWindow = 0.8 +) // A Value is the AWS credentials value for individual credential fields. type Value struct { @@ -82,10 +87,15 @@ type Expiry struct { // the expiration time given to ensure no requests are made with expired // tokens. func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + cut := window + if cut < 0 { + expireIn := expiration.Sub(e.CurrentTime()) + cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) } + e.expiration = expiration.Add(-cut) } // IsExpired returns if the credentials are expired. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go new file mode 100644 index 0000000000000..f4b027a41451c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go @@ -0,0 +1,96 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" +) + +// ErrorResponse - Is the typed error returned. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"` + STSError struct { + Type string `xml:"Type"` + Code string `xml:"Code"` + Message string `xml:"Message"` + } `xml:"Error"` + RequestID string `xml:"RequestId"` +} + +// Error - Is the typed error returned by all API operations. +type Error struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// Error - Returns S3 error string. +func (e Error) Error() string { + if e.Message == "" { + return fmt.Sprintf("Error response code %s.", e.Code) + } + return e.Message +} + +// Error - Returns STS error string. +func (e ErrorResponse) Error() string { + if e.STSError.Message == "" { + return fmt.Sprintf("Error response code %s.", e.STSError.Code) + } + return e.STSError.Message +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index ca6db005b8f83..dc3f3cc0b410b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -122,7 +122,7 @@ type config struct { // returned if it fails to read from the file. func loadAlias(filename, alias string) (hostConfig, error) { cfg := &config{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary configBytes, err := ioutil.ReadFile(filename) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index 2ec0afc190468..f7a4af4a2aec8 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -19,6 +19,7 @@ package credentials import ( "bufio" + "context" "errors" "fmt" "io/ioutil" @@ -38,7 +39,10 @@ import ( // prior to the credentials actually expiring. This is beneficial // so race conditions with expiring credentials do not cause // request to fail unexpectedly due to ExpiredTokenException exceptions. -const DefaultExpiryWindow = time.Second * 10 // 10 secs +// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. +// When used the tokens refresh will be triggered when 80% of the elapsed +// time until the actual expiration time is passed. +const DefaultExpiryWindow = -1 // A IAM retrieves credentials from the EC2 service, and keeps track if // those credentials are expired. @@ -59,6 +63,10 @@ const ( defaultECSRoleEndpoint = "http://169.254.170.2" defaultSTSRoleEndpoint = "https://sts.amazonaws.com" defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" + tokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + tokenPath = "/latest/api/token" + tokenTTL = "21600" + tokenRequestHeader = "X-aws-ec2-metadata-token" ) // NewIAM returns a pointer to a new Credentials object wrapping the IAM. @@ -75,6 +83,7 @@ func NewIAM(endpoint string) *Credentials { // Error will be returned if the request fails, or unable to extract // the desired func (m *IAM) Retrieve() (Value, error) { + token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") var roleCreds ec2RoleCredRespBody var err error @@ -104,7 +113,7 @@ func (m *IAM) Retrieve() (Value, error) { return &WebIdentityToken{Token: string(token)}, nil }, - roleARN: os.Getenv("AWS_ROLE_ARN"), + RoleARN: os.Getenv("AWS_ROLE_ARN"), roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"), } @@ -120,7 +129,7 @@ func (m *IAM) Retrieve() (Value, error) { os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) } - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0: if len(endpoint) == 0 { @@ -134,7 +143,7 @@ func (m *IAM) Retrieve() (Value, error) { } } - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) default: roleCreds, err = getCredentials(m.Client, endpoint) @@ -176,10 +185,6 @@ type ec2RoleCredRespBody struct { // be sent to fetch the rolling access credentials. // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html func getIAMRoleURL(endpoint string) (*url.URL, error) { - if endpoint == "" { - endpoint = defaultIAMRoleEndpoint - } - u, err := url.Parse(endpoint) if err != nil { return nil, err @@ -192,11 +197,14 @@ func getIAMRoleURL(endpoint string) (*url.URL, error) { // with the current EC2 service. If there are no credentials, // or there is an error making or receiving the request. // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { +func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, err } + if token != "" { + req.Header.Add(tokenRequestHeader, token) + } resp, err := client.Do(req) if err != nil { return nil, err @@ -219,12 +227,16 @@ func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { return credsList, nil } -func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { +func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (ec2RoleCredRespBody, error) { req, err := http.NewRequest(http.MethodGet, endpoint, nil) if err != nil { return ec2RoleCredRespBody{}, err } + if token != "" { + req.Header.Set("Authorization", token) + } + resp, err := client.Do(req) if err != nil { return ec2RoleCredRespBody{}, err @@ -242,12 +254,42 @@ func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRes return respCreds, nil } +func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil) + if err != nil { + return "", err + } + req.Header.Add(tokenRequestTTLHeader, tokenTTL) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", errors.New(resp.Status) + } + return string(data), nil +} + // getCredentials - obtains the credentials from the IAM role name associated with // the current EC2 service. // // If the credentials cannot be found, or there is an error // reading the response an error will be returned. func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + if endpoint == "" { + endpoint = defaultIAMRoleEndpoint + } + + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + token, _ := fetchIMDSToken(client, endpoint) // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html u, err := getIAMRoleURL(endpoint) @@ -256,7 +298,7 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, } // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - roleNames, err := listRoleNames(client, u) + roleNames, err := listRoleNames(client, u, token) if err != nil { return ec2RoleCredRespBody{}, err } @@ -280,6 +322,9 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, if err != nil { return ec2RoleCredRespBody{}, err } + if token != "" { + req.Header.Add(tokenRequestHeader, token) + } resp, err := client.Do(req) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go similarity index 100% rename from vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go rename to vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index b79f920f58543..1f106ef72c59b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -18,9 +18,11 @@ package credentials import ( + "bytes" "encoding/xml" "errors" "fmt" + "io/ioutil" "net/http" "net/url" "time" @@ -103,8 +105,8 @@ func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (* } func getClientGrantsCredentials(clnt *http.Client, endpoint string, - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { - + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), +) (AssumeRoleWithClientGrantsResponse, error) { accessToken, err := getClientGrantsTokenExpiry() if err != nil { return AssumeRoleWithClientGrantsResponse{}, err @@ -132,7 +134,22 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithClientGrantsResponse{}, errResp } a := AssumeRoleWithClientGrantsResponse{} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index bcb3c36a1748d..586995e86f6bf 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. + * Copyright 2019-2021 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +18,10 @@ package credentials import ( + "bytes" "encoding/xml" - "errors" + "fmt" + "io/ioutil" "net/http" "net/url" "time" @@ -60,26 +62,86 @@ type LDAPIdentity struct { // LDAP username/password used to fetch LDAP STS credentials. LDAPUsername, LDAPPassword string + + // Session policy to apply to the generated credentials. Leave empty to + // use the full access policy available to the user. + Policy string + + // RequestedExpiry is the configured expiry duration for credentials + // requested from LDAP. + RequestedExpiry time.Duration } // NewLDAPIdentity returns new credentials object that uses LDAP // Identity. -func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) { +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { + l := LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + } + for _, optFunc := range optFuncs { + optFunc(&l) + } + return New(&l), nil +} + +// LDAPIdentityOpt is a function type used to configured the LDAPIdentity +// instance. +type LDAPIdentityOpt func(*LDAPIdentity) + +// LDAPIdentityPolicyOpt sets the session policy for requested credentials. +func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.Policy = policy + } +} + +// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. +func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.RequestedExpiry = d + } +} + +func stripPassword(err error) error { + urlErr, ok := err.(*url.Error) + if ok { + u, _ := url.Parse(urlErr.URL) + if u == nil { + return urlErr + } + values := u.Query() + values.Set("LDAPPassword", "xxxxx") + u.RawQuery = values.Encode() + urlErr.URL = u.String() + return urlErr + } + return err +} + +// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses +// LDAP Identity with a specified session policy. The `policy` parameter must be +// a JSON string specifying the policy document. +// +// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. +func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { return New(&LDAPIdentity{ Client: &http.Client{Transport: http.DefaultTransport}, STSEndpoint: stsEndpoint, LDAPUsername: ldapUsername, LDAPPassword: ldapPassword, + Policy: policy, }), nil } // Retrieve gets the credential by calling the MinIO STS API for // LDAP on the configured stsEndpoint. func (k *LDAPIdentity) Retrieve() (value Value, err error) { - u, kerr := url.Parse(k.STSEndpoint) - if kerr != nil { - err = kerr - return + u, err := url.Parse(k.STSEndpoint) + if err != nil { + return value, err } v := url.Values{} @@ -87,25 +149,43 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { v.Set("Version", STSVersion) v.Set("LDAPUsername", k.LDAPUsername) v.Set("LDAPPassword", k.LDAPPassword) + if k.Policy != "" { + v.Set("Policy", k.Policy) + } + if k.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) + } u.RawQuery = v.Encode() - req, kerr := http.NewRequest(http.MethodPost, u.String(), nil) - if kerr != nil { - err = kerr - return + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return value, stripPassword(err) } - resp, kerr := k.Client.Do(req) - if kerr != nil { - err = kerr - return + resp, err := k.Client.Do(req) + if err != nil { + return value, stripPassword(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = errors.New(resp.Status) - return + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return value, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return value, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return value, errResp } r := AssumeRoleWithLDAPResponse{} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go new file mode 100644 index 0000000000000..c7ac4db3b75fb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -0,0 +1,209 @@ +// MinIO Go Library for Amazon S3 Compatible Cloud Storage +// Copyright 2021 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "bytes" + "crypto/tls" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "time" +) + +// CertificateIdentityOption is an optional AssumeRoleWithCertificate +// parameter - e.g. a custom HTTP transport configuration or S3 credental +// livetime. +type CertificateIdentityOption func(*STSCertificateIdentity) + +// CertificateIdentityWithTransport returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given http.RoundTripper. +func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) +} + +// CertificateIdentityWithExpiry returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given livetime. +// +// Fetched S3 credentials will have the given livetime if the STS server +// allows such credentials. +func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) +} + +// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and +// rotates those credentials once they expire. +type STSCertificateIdentity struct { + Expiry + + // STSEndpoint is the base URL endpoint of the STS API. + // For example, https://minio.local:9000 + STSEndpoint string + + // S3CredentialLivetime is the duration temp. S3 access + // credentials should be valid. + // + // It represents the access credential livetime requested + // by the client. The STS server may choose to issue + // temp. S3 credentials that have a different - usually + // shorter - livetime. + // + // The default livetime is one hour. + S3CredentialLivetime time.Duration + + // Client is the HTTP client used to authenticate and fetch + // S3 credentials. + // + // A custom TLS client configuration can be specified by + // using a custom http.Transport: + // Client: http.Client { + // Transport: &http.Transport{ + // TLSClientConfig: &tls.Config{}, + // }, + // } + Client http.Client +} + +var _ Provider = (*STSWebIdentity)(nil) // compiler check + +// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates +// to the given STS endpoint with the given TLS certificate and retrieves and +// rotates S3 credentials. +func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { + if endpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if _, err := url.Parse(endpoint); err != nil { + return nil, err + } + identity := &STSCertificateIdentity{ + STSEndpoint: endpoint, + Client: http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 5 * time.Second, + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{certificate}, + }, + }, + }, + } + for _, option := range options { + option(identity) + } + return New(identity), nil +} + +// Retrieve fetches a new set of S3 credentials from the configured +// STS API endpoint. +func (i *STSCertificateIdentity) Retrieve() (Value, error) { + endpointURL, err := url.Parse(i.STSEndpoint) + if err != nil { + return Value{}, err + } + livetime := i.S3CredentialLivetime + if livetime == 0 { + livetime = 1 * time.Hour + } + + queryValues := url.Values{} + queryValues.Set("Action", "AssumeRoleWithCertificate") + queryValues.Set("Version", STSVersion) + endpointURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) + if err != nil { + return Value{}, err + } + req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) + + resp, err := i.Client.Do(req) + if err != nil { + return Value{}, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return Value{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return Value{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return Value{}, errResp + } + + const MaxSize = 10 * 1 << 20 + var body io.Reader = resp.Body + if resp.ContentLength > 0 && resp.ContentLength < MaxSize { + body = io.LimitReader(body, resp.ContentLength) + } else { + body = io.LimitReader(body, MaxSize) + } + + var response assumeRoleWithCertificateResponse + if err = xml.NewDecoder(body).Decode(&response); err != nil { + return Value{}, err + } + i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: response.Result.Credentials.AccessKey, + SecretAccessKey: response.Result.Credentials.SecretKey, + SessionToken: response.Result.Credentials.SessionToken, + SignerType: SignatureDefault, + }, nil +} + +// Expiration returns the expiration time of the current S3 credentials. +func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } + +type assumeRoleWithCertificateResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` + Result struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:"Credentials" json:"credentials,omitempty"` + } `xml:"AssumeRoleWithCertificateResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 161ffd36cd05e..19bc3ddfc00e0 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -18,9 +18,11 @@ package credentials import ( + "bytes" "encoding/xml" "errors" "fmt" + "io/ioutil" "net/http" "net/url" "strconv" @@ -54,8 +56,9 @@ type WebIdentityResult struct { // WebIdentityToken - web identity token with expiry. type WebIdentityToken struct { - Token string - Expiry int + Token string + AccessToken string + Expiry int } // A STSWebIdentity retrieves credentials from MinIO service, and keeps track if @@ -77,9 +80,9 @@ type STSWebIdentity struct { // This is a customer provided function and is mandatory. GetWebIDTokenExpiry func() (*WebIdentityToken, error) - // roleARN is the Amazon Resource Name (ARN) of the role that the caller is + // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is // assuming. - roleARN string + RoleARN string // roleSessionName is the identifier for the assumed role session. roleSessionName string @@ -104,7 +107,8 @@ func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdent } func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, - getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { + getWebIDTokenExpiry func() (*WebIdentityToken, error), +) (AssumeRoleWithWebIdentityResponse, error) { idToken, err := getWebIDTokenExpiry() if err != nil { return AssumeRoleWithWebIdentityResponse{}, err @@ -121,6 +125,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession v.Set("RoleSessionName", roleSessionName) } v.Set("WebIdentityToken", idToken.Token) + if idToken.AccessToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityAccessToken", idToken.AccessToken) + } if idToken.Expiry > 0 { v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) } @@ -145,7 +153,22 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithWebIdentityResponse{}, errResp } a := AssumeRoleWithWebIdentityResponse{} @@ -159,7 +182,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession // Retrieve retrieves credentials from the MinIO service. // Error will be returned if the request fails. func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.roleARN, m.roleSessionName, m.GetWebIDTokenExpiry) + a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry) if err != nil { return Value{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index ce7d2153106f7..06e68e7367e2d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -101,7 +101,7 @@ func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { if context == nil { return kms{key: keyID, hasContext: false}, nil } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary serializedContext, err := json.Marshal(context) if err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index 48e91b38ed9c0..743d8eca96b74 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -19,10 +19,14 @@ package lifecycle import ( + "encoding/json" "encoding/xml" + "errors" "time" ) +var errMissingStorageClass = errors.New("storage-class cannot be empty") + // AbortIncompleteMultipartUpload structure, not supported yet on MinIO type AbortIncompleteMultipartUpload struct { XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` @@ -49,13 +53,14 @@ func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.Sta // (or suspended) to request server delete noncurrent object versions at a // specific period in the object's lifetime. type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty"` } -// MarshalXML if non-current days not set to non zero value +// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions. func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() { + if n.isNull() { return nil } type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration @@ -67,13 +72,18 @@ func (n NoncurrentVersionExpiration) IsDaysNull() bool { return n.NoncurrentDays == ExpirationDays(0) } +func (n NoncurrentVersionExpiration) isNull() bool { + return n.IsDaysNull() && n.NewerNoncurrentVersions == 0 +} + // NoncurrentVersionTransition structure, set this action to request server to // transition noncurrent object versions to different set storage classes // at a specific period in the object's lifetime. type NoncurrentVersionTransition struct { - XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` + XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` } // IsDaysNull returns true if days field is null @@ -81,10 +91,35 @@ func (n NoncurrentVersionTransition) IsDaysNull() bool { return n.NoncurrentDays == ExpirationDays(0) } +// IsStorageClassEmpty returns true if storage class field is empty +func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { + return n.StorageClass == "" +} + +func (n NoncurrentVersionTransition) isNull() bool { + return n.StorageClass == "" +} + +// UnmarshalJSON implements NoncurrentVersionTransition JSONify +func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { + type noncurrentVersionTransition NoncurrentVersionTransition + var nt noncurrentVersionTransition + err := json.Unmarshal(b, &nt) + if err != nil { + return err + } + + if nt.StorageClass == "" { + return errMissingStorageClass + } + *n = NoncurrentVersionTransition(nt) + return nil +} + // MarshalXML is extended to leave out // tags func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() { + if n.isNull() { return nil } type noncurrentVersionTransitionWrapper NoncurrentVersionTransition @@ -108,7 +143,46 @@ type Transition struct { XMLName xml.Name `xml:"Transition" json:"-"` Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + Days ExpirationDays `xml:"Days" json:"Days"` +} + +// UnmarshalJSON returns an error if storage-class is empty. +func (t *Transition) UnmarshalJSON(b []byte) error { + type transition Transition + var tr transition + err := json.Unmarshal(b, &tr) + if err != nil { + return err + } + + if tr.StorageClass == "" { + return errMissingStorageClass + } + *t = Transition(tr) + return nil +} + +// MarshalJSON customizes json encoding by omitting empty values +func (t Transition) MarshalJSON() ([]byte, error) { + if t.IsNull() { + return nil, nil + } + type transition struct { + Date *ExpirationDate `json:"Date,omitempty"` + StorageClass string `json:"StorageClass,omitempty"` + Days *ExpirationDays `json:"Days"` + } + + newt := transition{ + StorageClass: t.StorageClass, + } + + if !t.IsDateNull() { + newt.Date = &t.Date + } else { + newt.Days = &t.Days + } + return json.Marshal(newt) } // IsDaysNull returns true if days field is null @@ -121,9 +195,9 @@ func (t Transition) IsDateNull() bool { return t.Date.Time.IsZero() } -// IsNull returns true if both date and days fields are null +// IsNull returns true if no storage-class is set. func (t Transition) IsNull() bool { - return t.IsDaysNull() && t.IsDateNull() + return t.StorageClass == "" } // MarshalXML is transition is non null @@ -137,9 +211,9 @@ func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e // And And Rule for LifecycleTag, to be used in LifecycleRuleFilter type And struct { - XMLName xml.Name `xml:"And,omitempty" json:"-"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Tags []Tag `xml:"Tag,omitempty" json:"Tags,omitempty"` + XMLName xml.Name `xml:"And" json:"-"` + Prefix string `xml:"Prefix" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag" json:"Tags,omitempty"` } // IsEmpty returns true if Tags field is null @@ -155,6 +229,31 @@ type Filter struct { Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` } +// IsNull returns true if all Filter fields are empty. +func (f Filter) IsNull() bool { + return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" +} + +// MarshalJSON customizes json encoding by removing empty values. +func (f Filter) MarshalJSON() ([]byte, error) { + type filter struct { + And *And `json:"And,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Tag *Tag `json:"Tag,omitempty"` + } + + newf := filter{ + Prefix: f.Prefix, + } + if !f.Tag.IsEmpty() { + newf.Tag = &f.Tag + } + if !f.And.IsEmpty() { + newf.And = &f.And + } + return json.Marshal(newf) +} + // MarshalXML - produces the xml representation of the Filter struct // only one of Prefix, And and Tag should be present in the output. func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { @@ -233,6 +332,26 @@ type Expiration struct { DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` } +// MarshalJSON customizes json encoding by removing empty day/date specification. +func (e Expiration) MarshalJSON() ([]byte, error) { + type expiration struct { + Date *ExpirationDate `json:"Date,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker + } + + newexp := expiration{ + DeleteMarker: e.DeleteMarker, + } + if !e.IsDaysNull() { + newexp.Days = &e.Days + } + if !e.IsDateNull() { + newexp.Date = &e.Date + } + return json.Marshal(newexp) +} + // IsDaysNull returns true if days field is null func (e Expiration) IsDaysNull() bool { return e.Days == ExpirationDays(0) @@ -262,6 +381,47 @@ func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e return en.EncodeElement(expirationWrapper(e), startElement) } +// MarshalJSON customizes json encoding by omitting empty values +func (r Rule) MarshalJSON() ([]byte, error) { + type rule struct { + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration *Expiration `json:"Expiration,omitempty"` + ID string `json:"ID"` + RuleFilter *Filter `json:"Filter,omitempty"` + NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Status string `json:"Status"` + Transition *Transition `json:"Transition,omitempty"` + } + newr := rule{ + Prefix: r.Prefix, + Status: r.Status, + ID: r.ID, + } + + if !r.RuleFilter.IsNull() { + newr.RuleFilter = &r.RuleFilter + } + if !r.AbortIncompleteMultipartUpload.IsDaysNull() { + newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload + } + if !r.Expiration.IsNull() { + newr.Expiration = &r.Expiration + } + if !r.Transition.IsNull() { + newr.Transition = &r.Transition + } + if !r.NoncurrentVersionExpiration.isNull() { + newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration + } + if !r.NoncurrentVersionTransition.isNull() { + newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition + } + + return json.Marshal(newr) +} + // Rule represents a single rule in lifecycle configuration type Rule struct { XMLName xml.Name `xml:"Rule,omitempty" json:"-"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index b17e6c54fbbb0..75a1f609621ec 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -78,11 +78,13 @@ type Arn struct { // NewArn creates new ARN based on the given partition, service, region, account id and resource func NewArn(partition, service, region, accountID, resource string) Arn { - return Arn{Partition: partition, + return Arn{ + Partition: partition, Service: service, Region: region, AccountID: accountID, - Resource: resource} + Resource: resource, + } } // String returns the string format of the ARN diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 6df89821443ce..97abf8df8ffe4 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -22,12 +22,13 @@ import ( "fmt" "strconv" "strings" + "time" "unicode/utf8" "github.com/rs/xid" ) -var errInvalidFilter = fmt.Errorf("Invalid filter") +var errInvalidFilter = fmt.Errorf("invalid filter") // OptionType specifies operation to be performed on config type OptionType string @@ -46,19 +47,21 @@ const ( // Options represents options to set a replication configuration rule type Options struct { - Op OptionType - ID string - Prefix string - RuleStatus string - Priority string - TagString string - StorageClass string - RoleArn string - DestBucket string - IsTagSet bool - IsSCSet bool - ReplicateDeletes string // replicate versioned deletes - ReplicateDeleteMarkers string // replicate soft deletes + Op OptionType + RoleArn string + ID string + Prefix string + RuleStatus string + Priority string + TagString string + StorageClass string + DestBucket string + IsTagSet bool + IsSCSet bool + ReplicateDeletes string // replicate versioned deletes + ReplicateDeleteMarkers string // replicate soft deletes + ReplicaSync string // replicate replica metadata modifications + ExistingObjectReplicate string } // Tags returns a slice of tags for a rule @@ -71,7 +74,7 @@ func (opts Options) Tags() ([]Tag, error) { } kv := strings.SplitN(tok, "=", 2) if len(kv) != 2 { - return []Tag{}, fmt.Errorf("Tags should be entered as comma separated k=v pairs") + return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") } tagList = append(tagList, Tag{ Key: kv[0], @@ -101,9 +104,23 @@ func (c *Config) AddRule(opts Options) error { if err != nil { return err } - if opts.RoleArn != c.Role && c.Role != "" { - return fmt.Errorf("Role ARN does not match existing configuration") + var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite + if opts.RoleArn != "" { + tokens := strings.Split(opts.RoleArn, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) + } + switch { + case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: + c.Role = opts.RoleArn + compatSw = true + case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): + c.Role = opts.RoleArn + default: + return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) + } } + var status Status // toggle rule status for edit option switch opts.RuleStatus { @@ -112,7 +129,7 @@ func (c *Config) AddRule(opts Options) error { case "disable": status = Disabled default: - return fmt.Errorf("Rule state should be either [enable|disable]") + return fmt.Errorf("rule state should be either [enable|disable]") } tags, err := opts.Tags() @@ -137,24 +154,11 @@ func (c *Config) AddRule(opts Options) error { if opts.ID == "" { opts.ID = xid.New().String() } - arnStr := opts.RoleArn - if opts.RoleArn == "" { - arnStr = c.Role - } - if arnStr == "" { - return fmt.Errorf("Role ARN required") - } - tokens := strings.Split(arnStr, ":") - if len(tokens) != 6 { - return fmt.Errorf("invalid format for replication Arn") - } - if c.Role == "" { - c.Role = arnStr - } + destBucket := opts.DestBucket // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 { + if len(btokens) == 1 && compatSw { destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) } else { return fmt.Errorf("destination bucket needs to be in Arn format") @@ -183,7 +187,28 @@ func (c *Config) AddRule(opts Options) error { return fmt.Errorf("ReplicateDeletes should be either enable|disable") } } + var replicaSync Status + // replica sync is by default Enabled, unless specified. + switch opts.ReplicaSync { + case "enable", "": + replicaSync = Enabled + case "disable": + replicaSync = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + var existingStatus Status + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + existingStatus = Enabled + case "disable", "": + existingStatus = Disabled + default: + return fmt.Errorf("existingObjectReplicate should be either enable|disable") + } + } newRule := Rule{ ID: opts.ID, Priority: priority, @@ -200,24 +225,33 @@ func (c *Config) AddRule(opts Options) error { // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html SourceSelectionCriteria: SourceSelectionCriteria{ ReplicaModifications: ReplicaModifications{ - Status: Enabled, + Status: replicaSync, }, }, + // By default disable existing object replication unless selected + ExistingObjectReplication: ExistingObjectReplication{ + Status: existingStatus, + }, } // validate rule after overlaying priority for pre-existing rule being disabled. if err := newRule.Validate(); err != nil { return err } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + for _, rule := range c.Rules { if rule.Priority == newRule.Priority { - return fmt.Errorf("Priority must be unique. Replication configuration already has a rule with this priority") - } - if rule.Destination.Bucket != newRule.Destination.Bucket { - return fmt.Errorf("The destination bucket must be same for all rules") + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") } if rule.ID == newRule.ID { - return fmt.Errorf("A rule exists with this ID") + return fmt.Errorf("a rule exists with this ID") } } @@ -228,8 +262,16 @@ func (c *Config) AddRule(opts Options) error { // EditRule modifies an existing rule in replication config func (c *Config) EditRule(opts Options) error { if opts.ID == "" { - return fmt.Errorf("Rule ID missing") + return fmt.Errorf("rule ID missing") + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" } + rIdx := -1 var newRule Rule for i, rule := range c.Rules { @@ -240,7 +282,7 @@ func (c *Config) EditRule(opts Options) error { } } if rIdx < 0 { - return fmt.Errorf("Rule with ID %s not found in replication configuration", opts.ID) + return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) } prefixChg := opts.Prefix != newRule.Prefix() if opts.IsTagSet || prefixChg { @@ -286,7 +328,7 @@ func (c *Config) EditRule(opts Options) error { case "disable": newRule.Status = Disabled default: - return fmt.Errorf("Rule state should be either [enable|disable]") + return fmt.Errorf("rule state should be either [enable|disable]") } } // set DeleteMarkerReplication rule status for edit option @@ -314,6 +356,27 @@ func (c *Config) EditRule(opts Options) error { } } + if opts.ReplicaSync != "" { + switch opts.ReplicaSync { + case "enable", "": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled + case "disable": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + } + + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + newRule.ExistingObjectReplication.Status = Enabled + case "disable": + newRule.ExistingObjectReplication.Status = Disabled + default: + return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") + } + } if opts.IsSCSet { newRule.Destination.StorageClass = opts.StorageClass } @@ -328,11 +391,7 @@ func (c *Config) EditRule(opts Options) error { destBucket := opts.DestBucket // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 { - destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) - } else { - return fmt.Errorf("destination bucket needs to be in Arn format") - } + return fmt.Errorf("destination bucket needs to be in Arn format") } newRule.Destination.Bucket = destBucket } @@ -343,10 +402,10 @@ func (c *Config) EditRule(opts Options) error { // ensure priority and destination bucket restrictions are not violated for idx, rule := range c.Rules { if rule.Priority == newRule.Priority && rIdx != idx { - return fmt.Errorf("Priority must be unique. Replication configuration already has a rule with this priority") + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") } - if rule.Destination.Bucket != newRule.Destination.Bucket { - return fmt.Errorf("The destination bucket must be same for all rules") + if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { + return fmt.Errorf("invalid destination bucket for this rule") } } @@ -369,24 +428,24 @@ func (c *Config) RemoveRule(opts Options) error { return fmt.Errorf("Rule with ID %s not found", opts.ID) } if len(newRules) == 0 { - return fmt.Errorf("Replication configuration should have at least one rule") + return fmt.Errorf("replication configuration should have at least one rule") } c.Rules = newRules return nil - } // Rule - a rule for replication configuration. type Rule struct { - XMLName xml.Name `xml:"Rule" json:"-"` - ID string `xml:"ID,omitempty"` - Status Status `xml:"Status"` - Priority int `xml:"Priority"` - DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` - DeleteReplication DeleteReplication `xml:"DeleteReplication"` - Destination Destination `xml:"Destination"` - Filter Filter `xml:"Filter" json:"Filter"` - SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` + XMLName xml.Name `xml:"Rule" json:"-"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Priority int `xml:"Priority"` + DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` + DeleteReplication DeleteReplication `xml:"DeleteReplication"` + Destination Destination `xml:"Destination"` + Filter Filter `xml:"Filter" json:"Filter"` + SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` } // Validate validates the rule for correctness @@ -402,14 +461,13 @@ func (r Rule) Validate() error { } if r.Priority < 0 && r.Status == Enabled { - return fmt.Errorf("Priority must be set for the rule") + return fmt.Errorf("priority must be set for the rule") } if err := r.validateStatus(); err != nil { return err } - - return nil + return r.ExistingObjectReplication.Validate() } // validateID - checks if ID is valid or not. @@ -436,10 +494,7 @@ func (r Rule) validateStatus() error { } func (r Rule) validateFilter() error { - if err := r.Filter.Validate(); err != nil { - return err - } - return nil + return r.Filter.Validate() } // Prefix - a rule can either have prefix under or under @@ -525,11 +580,11 @@ func (tag Tag) IsEmpty() bool { // Validate checks this tag. func (tag Tag) Validate() error { if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { - return fmt.Errorf("Invalid Tag Key") + return fmt.Errorf("invalid Tag Key") } if utf8.RuneCountInString(tag.Value) > 256 { - return fmt.Errorf("Invalid Tag Value") + return fmt.Errorf("invalid Tag Value") } return nil } @@ -585,7 +640,7 @@ func (d DeleteReplication) IsEmpty() bool { // ReplicaModifications specifies if replica modification sync is enabled type ReplicaModifications struct { - Status Status `xml:"Status" json:"Status"` + Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default } // SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. @@ -604,7 +659,88 @@ func (s SourceSelectionCriteria) Validate() error { return nil } if !s.IsValid() { - return fmt.Errorf("Invalid ReplicaModification status") + return fmt.Errorf("invalid ReplicaModification status") + } + return nil +} + +// ExistingObjectReplication - whether existing object replication is enabled +type ExistingObjectReplication struct { + Status Status `xml:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (e ExistingObjectReplication) IsEmpty() bool { + return len(e.Status) == 0 +} + +// Validate validates whether the status is disabled. +func (e ExistingObjectReplication) Validate() error { + if e.IsEmpty() { + return nil + } + if e.Status != Disabled && e.Status != Enabled { + return fmt.Errorf("invalid ExistingObjectReplication status") } return nil } + +// TargetMetrics represents inline replication metrics +// such as pending, failed and completed bytes in total for a bucket remote target +type TargetMetrics struct { + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize"` + // Completed size in bytes + ReplicatedSize uint64 `json:"completedReplicationSize"` + // Total Replica size in bytes + ReplicaSize uint64 `json:"replicaSize"` + // Failed size in bytes + FailedSize uint64 `json:"failedReplicationSize"` + // Total number of pending operations including metadata updates + PendingCount uint64 `json:"pendingReplicationCount"` + // Total number of failed operations including metadata updates + FailedCount uint64 `json:"failedReplicationCount"` +} + +// Metrics represents inline replication metrics for a bucket. +type Metrics struct { + Stats map[string]TargetMetrics + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize"` + // Completed size in bytes across targets + ReplicatedSize uint64 `json:"completedReplicationSize"` + // Total Replica size in bytes across targets + ReplicaSize uint64 `json:"replicaSize"` + // Failed size in bytes across targets + FailedSize uint64 `json:"failedReplicationSize"` + // Total number of pending operations including metadata updates across targets + PendingCount uint64 `json:"pendingReplicationCount"` + // Total number of failed operations including metadata updates across targets + FailedCount uint64 `json:"failedReplicationCount"` +} + +// ResyncTargetsInfo provides replication target information to resync replicated data. +type ResyncTargetsInfo struct { + Targets []ResyncTarget `json:"target,omitempty"` +} + +// ResyncTarget provides the replica resources and resetID to initiate resync replication. +type ResyncTarget struct { + Arn string `json:"arn"` + ResetID string `json:"resetid"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + // Status of resync operation + ResyncStatus string `json:"resyncStatus,omitempty"` + // Completed size in bytes + ReplicatedSize int64 `json:"completedReplicationSize,omitempty"` + // Failed size in bytes + FailedSize int64 `json:"failedReplicationSize,omitempty"` + // Total number of failed operations + FailedCount int64 `json:"failedReplicationCount,omitempty"` + // Total number of failed operations + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // Last bucket/object replicated. + Bucket string `json:"bucket,omitempty"` + Object string `json:"object,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go index fea25d6ef6489..2f1a5a653c158 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -104,6 +104,9 @@ var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`) // Regular expression used to determine if the arg is elb host in china. var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`) +// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style +var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`) + // GetRegionFromURL - returns a region from url host. func GetRegionFromURL(endpointURL url.URL) string { if endpointURL == sentinelURL { @@ -139,6 +142,10 @@ func GetRegionFromURL(endpointURL url.URL) string { if len(parts) > 1 { return parts[1] } + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } return "" } @@ -171,6 +178,7 @@ func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { return false } return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" || endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" } @@ -201,6 +209,15 @@ func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) } +// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint +// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html. +func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return amazonS3HostPrivateLink.MatchString(endpointURL.Host) +} + // IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. func IsGoogleEndpoint(endpointURL url.URL) bool { if endpointURL == sentinelURL { @@ -211,7 +228,7 @@ func IsGoogleEndpoint(endpointURL url.URL) bool { // Expects ascii encoded strings - from output of urlEncodePath func percentEncodeSlash(s string) string { - return strings.Replace(s, "/", "%2F", -1) + return strings.ReplaceAll(s, "/", "%2F") } // QueryEncode - encodes query values in their URL encoded form. In diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index 7b2ca91d128f0..b1296d2b17628 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -114,8 +114,8 @@ func buildChunkHeader(chunkLen int64, signature string) []byte { // buildChunkSignature - returns chunk signature for a given chunk and previous signature. func buildChunkSignature(chunkData []byte, reqTime time.Time, region, - previousSignature, secretAccessKey string) string { - + previousSignature, secretAccessKey string, +) string { chunkStringToSign := buildChunkStringToSign(reqTime, region, previousSignature, chunkData) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) @@ -200,8 +200,8 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { // StreamingSignV4 - provides chunked upload signatureV4 support by // implementing io.Reader. func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time) *http.Request { - + region string, dataLen int64, reqTime time.Time, +) *http.Request { // Set headers needed for streaming signature. prepareStreamingRequest(req, sessionToken, dataLen, reqTime) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index 71821a26a7f9d..cf7921d1f6437 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -233,16 +233,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { if idx > 0 { buf.WriteByte(',') } - if strings.Contains(v, "\n") { - // TODO: "Unfold" long headers that - // span multiple lines (as allowed by - // RFC 2616, section 4.2) by replacing - // the folding white-space (including - // new-line) by a single space. - buf.WriteString(v) - } else { - buf.WriteString(v) - } + buf.WriteString(v) } buf.WriteByte('\n') } @@ -252,10 +243,14 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { // http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign // Whitelist resource list that will be used in query string for signature-V2 calculation. -// The list should be alphabetically sorted +// +// This list should be kept alphabetically sorted, do not hastily edit. var resourceList = []string{ "acl", + "cors", "delete", + "encryption", + "legal-hold", "lifecycle", "location", "logging", @@ -270,6 +265,10 @@ var resourceList = []string{ "response-content-language", "response-content-type", "response-expires", + "retention", + "select", + "select-type", + "tagging", "torrent", "uploadId", "uploads", diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index 67572b20d2522..ce64c37d1172d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -42,22 +42,22 @@ const ( ServiceTypeSTS = "sts" ) -/// -/// Excerpts from @lsegal - -/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes -/// problems with generating pre-signed URLs (that are executed -/// by other agents) or when customers pass requests through -/// proxies, which may modify the user-agent. -/// -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// +// +// Excerpts from @lsegal - +// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +// +// User-Agent: +// +// This is ignored from signing because signing this causes +// problems with generating pre-signed URLs (that are executed +// by other agents) or when customers pass requests through +// proxies, which may modify the user-agent. +// +// +// Authorization: +// +// Is skipped for obvious reasons +// var v4IgnoredHeaders = map[string]bool{ "Authorization": true, "User-Agent": true, @@ -118,7 +118,9 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin headers = append(headers, strings.ToLower(k)) vals[strings.ToLower(k)] = vv } - headers = append(headers, "host") + if !headerExists("host", headers) { + headers = append(headers, "host") + } sort.Strings(headers) var buf bytes.Buffer @@ -130,7 +132,7 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin switch { case k == "host": buf.WriteString(getHostAddr(&req)) - fallthrough + buf.WriteByte('\n') default: for idx, v := range vals[k] { if idx > 0 { @@ -144,6 +146,15 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin return buf.String() } +func headerExists(key string, headers []string) bool { + for _, k := range headers { + if k == key { + return true + } + } + return false +} + // getSignedHeaders generate all signed request headers. // i.e lexically sorted, semicolon-separated list of lowercase // request header names. @@ -155,7 +166,9 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { } headers = append(headers, strings.ToLower(k)) } - headers = append(headers, "host") + if !headerExists("host", headers) { + headers = append(headers, "host") + } sort.Strings(headers) return strings.Join(headers, ";") } @@ -170,7 +183,7 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // \n // func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ req.Method, s3utils.EncodePath(req.URL.Path), @@ -186,7 +199,7 @@ func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashe func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) return stringToSign } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go index 2192a369331ea..b54fa4c77cfd9 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -44,6 +44,10 @@ func sumHMAC(key []byte, data []byte) []byte { // getHostAddr returns host header if available, otherwise returns host from URL func getHostAddr(req *http.Request) string { + host := req.Header.Get("host") + if host != "" && req.Host != host { + return host + } if req.Host != "" { return req.Host } diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 31a7308ccf28d..7aa96e0d69faf 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -316,8 +316,8 @@ func (p PostPolicy) marshalJSON() []byte { } retStr := "{" retStr = retStr + expirationStr + "," - retStr = retStr + conditionsStr - retStr = retStr + "}" + retStr += conditionsStr + retStr += "}" return []byte(retStr) } diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go index 3d25883b0c014..b54081d0d4c65 100644 --- a/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -20,7 +20,7 @@ package minio import "time" // newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { +func (c *Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { attemptCh := make(chan int) // normalize jitter to the range [0, 1.0] @@ -39,7 +39,7 @@ func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, ji if attempt > maxAttempt { attempt = maxAttempt } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) + // sleep = random_between(0, min(cap, base * 2 ** attempt)) sleep := unit * time.Duration(1< cap { sleep = cap diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index 598af29752ca2..f454e675cf5b2 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -42,7 +42,7 @@ var DefaultRetryCap = time.Second // newRetryTimer creates a timer with exponentially increasing // delays until the maximum retry attempts are reached. -func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { attemptCh := make(chan int) // computes the exponential backoff duration according to @@ -56,7 +56,7 @@ func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Durat jitter = MaxJitter } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) + // sleep = random_between(0, min(cap, base * 2 ** attempt)) sleep := unit * time.Duration(1< cap { sleep = cap @@ -110,6 +110,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) { // List of HTTP status codes which are retryable. var retryableHTTPStatusCodes = map[int]struct{}{ 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. http.StatusInternalServerError: {}, http.StatusBadGateway: {}, http.StatusServiceUnavailable: {}, diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go index d5ad15b8b4b7d..a88477b736944 100644 --- a/vendor/github.com/minio/minio-go/v7/transport.go +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -1,3 +1,4 @@ +//go:build go1.7 || go1.8 // +build go1.7 go1.8 /* diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 4bdf1a3c3bb25..3ebe7b2977bb0 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -18,14 +18,17 @@ package minio import ( + "context" "crypto/md5" "encoding/base64" "encoding/hex" "encoding/xml" + "errors" "fmt" "hash" "io" "io/ioutil" + "math/rand" "net" "net/http" "net/url" @@ -49,7 +52,7 @@ var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { - expTime, err := time.Parse(http.TimeFormat, matches[1]) + expTime, err := parseRFC7231Time(matches[1]) if err != nil { return time.Time{}, "" } @@ -58,6 +61,26 @@ func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { return time.Time{}, "" } +var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) + +func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { + matches := restoreRegex.FindStringSubmatch(restore) + if len(matches) != 4 { + return false, time.Time{}, errors.New("unexpected restore header") + } + ongoing, err = strconv.ParseBool(matches[1]) + if err != nil { + return false, time.Time{}, err + } + if matches[3] != "" { + expTime, err = parseRFC7231Time(matches[3]) + if err != nil { + return false, time.Time{}, err + } + } + return +} + // xmlDecoder provide decoded value in xml. func xmlDecoder(body io.Reader, v interface{}) error { d := xml.NewDecoder(body) @@ -82,21 +105,6 @@ func sumMD5Base64(data []byte) string { // getEndpointURL - construct a new endpoint. func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - if strings.Contains(endpoint, ":") { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - return nil, err - } - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, errInvalidArgument(msg) - } - } else { - if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, errInvalidArgument(msg) - } - } // If secure is false, use 'http' scheme. scheme := "https" if !secure { @@ -153,12 +161,18 @@ func isValidEndpointURL(endpointURL url.URL) error { if endpointURL.Path != "/" && endpointURL.Path != "" { return errInvalidArgument("Endpoint url cannot have fully qualified paths.") } - if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { + host := endpointURL.Hostname() + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." + return errInvalidArgument(msg) + } + + if strings.Contains(host, ".s3.amazonaws.com") { if !s3utils.IsAmazonEndpoint(endpointURL) { return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") } } - if strings.Contains(endpointURL.Host, ".googleapis.com") { + if strings.Contains(host, ".googleapis.com") { if !s3utils.IsGoogleEndpoint(endpointURL) { return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") } @@ -217,6 +231,27 @@ func extractObjMetadata(header http.Header) http.Header { return filteredHeader } +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +func parseTime(t string, formats ...string) (time.Time, error) { + for _, format := range formats { + tt, err := time.Parse(format, t) + if err == nil { + return tt, nil + } + } + return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) +} + +func parseRFC7231Time(lastModified string) (time.Time, error) { + return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) +} + // ToObjectInfo converts http header values into ObjectInfo type, // extracts metadata and fills in all the necessary fields in ObjectInfo. func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) { @@ -244,7 +279,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn } // Parse Last-Modified has http time format. - date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified")) + mtime, err := parseRFC7231Time(h.Get("Last-Modified")) if err != nil { return ObjectInfo{}, ErrorResponse{ Code: "InternalError", @@ -266,7 +301,18 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn expiryStr := h.Get("Expires") var expiry time.Time if expiryStr != "" { - expiry, _ = time.Parse(http.TimeFormat, expiryStr) + expiry, err = parseRFC7231Time(expiryStr) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } } metadata := extractObjMetadata(h) @@ -294,6 +340,16 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn } } + // Nil if not found + var restore *RestoreInfo + if restoreHdr := h.Get(amzRestore); restoreHdr != "" { + ongoing, expTime, err := amzRestoreToStruct(restoreHdr) + if err != nil { + return ObjectInfo{}, err + } + restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} + } + // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) @@ -304,7 +360,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn ETag: etag, Key: objectName, Size: size, - LastModified: date, + LastModified: mtime, ContentType: contentType, Expires: expiry, VersionID: h.Get(amzVersionID), @@ -319,6 +375,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn UserMetadata: userMetadata, UserTags: userTags, UserTagCount: tagCount, + Restore: restore, }, nil } @@ -370,7 +427,7 @@ func redactSignature(origAuth string) string { return "AWS **REDACTED**:**REDACTED**" } - /// Signature V4 authorization header. + // Signature V4 authorization header. // Strip out accessKeyID from: // Credential=////aws4_request @@ -397,19 +454,20 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) { return region } -var supportedHeaders = []string{ - "content-type", - "cache-control", - "content-encoding", - "content-disposition", - "content-language", - "x-amz-website-redirect-location", - "x-amz-object-lock-mode", - "x-amz-metadata-directive", - "x-amz-object-lock-retain-until-date", - "expires", - "x-amz-replication-status", +var supportedHeaders = map[string]bool{ + "content-type": true, + "cache-control": true, + "content-encoding": true, + "content-disposition": true, + "content-language": true, + "x-amz-website-redirect-location": true, + "x-amz-object-lock-mode": true, + "x-amz-metadata-directive": true, + "x-amz-object-lock-retain-until-date": true, + "expires": true, + "x-amz-replication-status": true, // Add more supported headers here. + // Must be lower case. } // isStorageClassHeader returns true if the header is a supported storage class header @@ -419,34 +477,24 @@ func isStorageClassHeader(headerKey string) bool { // isStandardHeader returns true if header is a supported header and not a custom header func isStandardHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, header := range supportedHeaders { - if strings.ToLower(header) == key { - return true - } - } - return false + return supportedHeaders[strings.ToLower(headerKey)] } // sseHeaders is list of server side encryption headers -var sseHeaders = []string{ - "x-amz-server-side-encryption", - "x-amz-server-side-encryption-aws-kms-key-id", - "x-amz-server-side-encryption-context", - "x-amz-server-side-encryption-customer-algorithm", - "x-amz-server-side-encryption-customer-key", - "x-amz-server-side-encryption-customer-key-MD5", +var sseHeaders = map[string]bool{ + "x-amz-server-side-encryption": true, + "x-amz-server-side-encryption-aws-kms-key-id": true, + "x-amz-server-side-encryption-context": true, + "x-amz-server-side-encryption-customer-algorithm": true, + "x-amz-server-side-encryption-customer-key": true, + "x-amz-server-side-encryption-customer-key-md5": true, + // Add more supported headers here. + // Must be lower case. } // isSSEHeader returns true if header is a server side encryption header. func isSSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range sseHeaders { - if strings.ToLower(h) == key { - return true - } - } - return false + return sseHeaders[strings.ToLower(headerKey)] } // isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. @@ -456,15 +504,17 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) } -var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} -var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +var ( + md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} + sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +) func newMd5Hasher() md5simd.Hasher { - return hashWrapper{Hash: md5Pool.New().(hash.Hash), isMD5: true} + return hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} } func newSHA256Hasher() md5simd.Hasher { - return hashWrapper{Hash: sha256Pool.New().(hash.Hash), isSHA256: true} + return hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} } // hashWrapper implements the md5simd.Hasher interface. @@ -486,3 +536,88 @@ func (m hashWrapper) Close() { } m.Hash = nil } + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +// IsNetworkOrHostDown - if there was a network error or if the host is down. +// expectTimeouts indicates that *context* timeouts are expected and does not +// indicate a downed host. Other timeouts still returns down. +func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { + if err == nil { + return false + } + + if errors.Is(err, context.Canceled) { + return false + } + + if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { + return false + } + + if errors.Is(err, context.DeadlineExceeded) { + return true + } + + // We need to figure if the error either a timeout + // or a non-temporary error. + urlErr := &url.Error{} + if errors.As(err, &urlErr) { + switch urlErr.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + } + } + var e net.Error + if errors.As(err, &e) { + if e.Timeout() { + return true + } + } + + // Fallback to other mechanisms. + switch { + case strings.Contains(err.Error(), "Connection closed by foreign host"): + return true + case strings.Contains(err.Error(), "TLS handshake timeout"): + // If error is - tlsHandshakeTimeoutError. + return true + case strings.Contains(err.Error(), "i/o timeout"): + // If error is - tcp timeoutError. + return true + case strings.Contains(err.Error(), "connection timed out"): + // If err is a net.Dial timeout. + return true + case strings.Contains(err.Error(), "connection refused"): + // If err is connection refused + return true + + case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): + // Denial errors + return true + } + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 83fa0a21dbc03..7e4e5cec924db 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -706,6 +706,7 @@ github.com/klauspost/compress/fse github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/s2 github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid v1.3.1 @@ -741,8 +742,8 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.0 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.10 -## explicit; go 1.12 +# github.com/minio/minio-go/v7 v7.0.24 +## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/encrypt From d7f03be2122dc8f798fad5c13068d1e0f45af82d Mon Sep 17 00:00:00 2001 From: Alex Ferm Date: Mon, 2 May 2022 03:13:55 -0500 Subject: [PATCH 044/223] Update libsystemd-dev from bullseye-backports for arm32 docker images, fixes (#5938) (#6012) --- clients/cmd/promtail/Dockerfile.arm32 | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32 index 8e70ba084e65a..28f92122e74ab 100644 --- a/clients/cmd/promtail/Dockerfile.arm32 +++ b/clients/cmd/promtail/Dockerfile.arm32 @@ -1,16 +1,21 @@ -FROM golang:1.17.9 as build +FROM golang:1.17.9-bullseye as build COPY . /src/loki WORKDIR /src/loki -RUN apt-get update && apt-get install -qy libsystemd-dev +# Backports repo required to get a libsystemd version 246 or newer which is required to handle journal +ZSTD compression +RUN echo "deb http://deb.debian.org/debian bullseye-backports main" >> /etc/apt/sources.list +RUN apt-get update && apt-get install -t bullseye-backports -qy libsystemd-dev RUN make clean && make BUILD_IN_CONTAINER=false promtail # Promtail requires debian as the base image to support systemd journal reading -FROM debian:stretch-slim +FROM debian:bullseye-slim # tzdata required for the timestamp stage to work +# Backports repo required to get a libsystemd version 246 or newer which is required to handle journal +ZSTD compression +RUN echo "deb http://deb.debian.org/debian bullseye-backports main" >> /etc/apt/sources.list RUN apt-get update && \ apt-get install -qy \ - tzdata ca-certificates libsystemd-dev && \ + tzdata ca-certificates +RUN apt-get install -t bullseye-backports -qy libsystemd-dev && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* COPY --from=build /src/loki/clients/cmd/promtail/promtail /usr/bin/promtail COPY clients/cmd/promtail/promtail-local-config.yaml /etc/promtail/local-config.yaml From cce2d8ef2f8fd78bdbbfa18155dcd0f66ac8788a Mon Sep 17 00:00:00 2001 From: Zach Leslie Date: Mon, 2 May 2022 01:15:47 -0700 Subject: [PATCH 045/223] Promoto policyRule to v1 (#6003) --- production/ksonnet/promtail/promtail.libsonnet | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/production/ksonnet/promtail/promtail.libsonnet b/production/ksonnet/promtail/promtail.libsonnet index 8f3a7d7a940b1..f3e3d76b616a1 100644 --- a/production/ksonnet/promtail/promtail.libsonnet +++ b/production/ksonnet/promtail/promtail.libsonnet @@ -14,8 +14,7 @@ config + scrape_config { _config+:: { namespace: $._config.namespace }, }, - local policyRule = k.rbac.v1beta1.policyRule, - + local policyRule = k.rbac.v1.policyRule, promtail_rbac: namespaced_k.util.rbac($._config.promtail_cluster_role_name, [ From 093fe91ce92e0dcc86d977d072cb6ebf7c6eb5d8 Mon Sep 17 00:00:00 2001 From: Evangelos Foutras Date: Mon, 2 May 2022 11:20:24 +0300 Subject: [PATCH 046/223] Bump inet.af/netaddr for Go 1.18 compatibility (#5872) Fixes panic caused by outdated go4.org/unsafe/assume-no-moving-gc. [1] [1] assume-no-moving-gc commit 538ce61f45ea5c2c48d7133d44b3e52c3db5c31e From 241bde1271025fcefa742a180e3ebd1578b84f02 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 May 2022 09:30:29 +0100 Subject: [PATCH 047/223] Add dot to some DNS address to reduce lookups (#5789) Kubernetes sets up a DNS search path like `namespace.svc.cluster.local, svc.cluster.local, cluster.local, google.internal`; any unqualified name is first tried as a prefix to each of these. These requests, which will never succeed, add latency and increase load on the DNS service. Adding a `.` at the end makes the name fully-qualified, so Go's DNS client will skip the search path and use the name as-is. Note only changes a subset of addresses in the config; others didn't seem to be causing a problem. --- CHANGELOG.md | 54 +++++++++++++++++++ .../ksonnet/loki/query-scheduler.libsonnet | 6 +-- 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92c8a018a2b02..a1cd34f92ef92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,60 @@ to include only the most relevant. * [4938](https://github.com/grafana/loki/pull/4938) **DylanGuedes**: Add distributor ring page * [4879](https://github.com/grafana/loki/pull/4879) **cyriltovena**: LogQL: add __line__ function to | line_format template * [4858](https://github.com/grafana/loki/pull/4858) **sandy2008**: feat(): add ManagedIdentity in Azure Blob Storage +## Main +* [5789](https://github.com/grafana/loki/pull/5789) **bboreham**: Production config: add dot to some DNS address to reduce lookups. +* [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4. +* [5715](https://github.com/grafana/loki/pull/5715) **chaudum** Add option to push RFC5424 syslog messages from Promtail in syslog scrape target. +* [5696](https://github.com/grafana/loki/pull/5696) **paullryan** don't block scraping of new logs from cloudflare within promtail if an error is received from cloudflare about too early logs. +* [5685](https://github.com/grafana/loki/pull/5625) **chaudum** Fix bug in push request parser that allowed users to send arbitrary non-string data as "log line". +* [5707](https://github.com/grafana/loki/pull/5707) **franzwong** Promtail: Rename config name limit_config to limits_config. +* [5626](https://github.com/grafana/loki/pull/5626) **jeschkies** Support multi-tenant select logs and samples queries. +* [5622](https://github.com/grafana/loki/pull/5622) **chaudum**: Fix bug in query splitter that caused `interval` query parameter to be ignored and therefore returning more logs than expected. +* [5521](https://github.com/grafana/loki/pull/5521) **cstyan**: Move stream lag configuration to top level clients config struct and refactor stream lag metric, this resolves a bug with duplicate metric collection when a single Promtail binary is running multiple Promtail clients. +* [5568](https://github.com/grafana/loki/pull/5568) **afayngelerindbx**: Fix canary panics due to concurrent execution of `confirmMissing` +* [5552](https://github.com/grafana/loki/pull/5552) **jiachengxu**: Loki mixin: add `DiskSpaceUtilizationPanel` +* [5541](https://github.com/grafana/loki/pull/5541) **bboreham**: Queries: reject very deeply nested regexps which could crash Loki. +* [5536](https://github.com/grafana/loki/pull/5536) **jiachengxu**: Loki mixin: make labelsSelector in loki chunks dashboards configurable +* [5535](https://github.com/grafana/loki/pull/5535) **jiachengxu**: Loki mixins: use labels selector for loki chunks dashboard +* [5507](https://github.com/grafana/loki/pull/5507) **MichelHollands**: Remove extra param in call for inflightRequests metric. +* [5481](https://github.com/grafana/loki/pull/5481) **MichelHollands**: Add a DeletionMode config variable to specify the delete mode and validate match parameters. +* [5356](https://github.com/grafana/loki/pull/5356) **jbschami**: Enhance lambda-promtail to support adding extra labels from an environment variable value +* [5409](https://github.com/grafana/loki/pull/5409) **ldb**: Enable best effort parsing for Syslog messages +* [5392](https://github.com/grafana/loki/pull/5392) **MichelHollands**: Etcd credentials are parsed as secrets instead of plain text now. +* [5361](https://github.com/grafana/loki/pull/5361) **ctovena**: Add usage report to grafana.com. +* [5289](https://github.com/grafana/loki/pull/5289) **ctovena**: Fix deduplication bug in queries when mutating labels. +* [5302](https://github.com/grafana/loki/pull/5302) **MasslessParticle** Update azure blobstore client to use new sdk. +* [5243](https://github.com/grafana/loki/pull/5290) **ssncferreira**: Update Promtail to support duration string formats. +* [5266](https://github.com/grafana/loki/pull/5266) **jeschkies**: Write Promtail position file atomically on Unix. +* [5280](https://github.com/grafana/loki/pull/5280) **jeschkies**: Fix Docker target connection loss. +* [5243](https://github.com/grafana/loki/pull/5243) **owen-d**: moves `querier.split-queries-by-interval` to limits code only. +* [5139](https://github.com/grafana/loki/pull/5139) **DylanGuedes**: Drop support for legacy configuration rules format. +* [5262](https://github.com/grafana/loki/pull/5262) **MichelHollands**: Remove the labelFilter field +* [4911](https://github.com/grafana/loki/pull/4911) **jeschkies**: Support Docker service discovery in Promtail. +* [5107](https://github.com/grafana/loki/pull/5107) **chaudum** Fix bug in fluentd plugin that caused log lines containing non UTF-8 characters to be dropped. +* [5148](https://github.com/grafana/loki/pull/5148) **chaudum** Add periodic task to prune old expired items from the FIFO cache to free up memory. +* [5187](https://github.com/grafana/loki/pull/5187) **aknuds1** Rename metric `cortex_experimental_features_in_use_total` to `loki_experimental_features_in_use_total` and metric `log_messages_total` to `loki_log_messages_total`. +* [5170](https://github.com/grafana/loki/pull/5170) **chaudum** Fix deadlock in Promtail caused when targets got removed from a target group by the discovery manager. +* [5163](https://github.com/grafana/loki/pull/5163) **chaudum** Fix regression in fluentd plugin introduced with #5107 that caused `NoMethodError` when parsing non-string values of log lines. +* [5144](https://github.com/grafana/loki/pull/5144) **dannykopping** Ruler: fix remote write basic auth credentials. +* [5091](https://github.com/grafana/loki/pull/5091) **owen-d**: Changes `ingester.concurrent-flushes` default to 32 +* [5031](https://github.com/grafana/loki/pull/5031) **liguozhong**: Promtail: Add global read rate limiting. +* [4879](https://github.com/grafana/loki/pull/4879) **cyriltovena**: LogQL: add __line__ function to | line_format template. +* [5081](https://github.com/grafana/loki/pull/5081) **SasSwart**: Add the option to configure memory ballast for Loki +* [5085](https://github.com/grafana/loki/pull/5085) **aknuds1**: Upgrade Cortex to [e0807c4eb487](https://github.com/cortexproject/cortex/compare/4e9fc3a2b5ab..e0807c4eb487) and Prometheus to [692a54649ed7](https://github.com/prometheus/prometheus/compare/2a3d62ac8456..692a54649ed7) +* [5067](https://github.com/grafana/loki/pull/5057) **cstyan**: Add a metric to Azure Blob Storage client to track total egress bytes +* [5065](https://github.com/grafana/loki/pull/5065) **AndreZiviani**: lambda-promtail: Add ability to ingest logs from S3 +* [4950](https://github.com/grafana/loki/pull/4950) **DylanGuedes**: Implement common instance addr/net interface +* [4949](https://github.com/grafana/loki/pull/4949) **ssncferreira**: Add query `queueTime` metric to statistics and metrics.go +* [4938](https://github.com/grafana/loki/pull/4938) **DylanGuedes**: Implement ring status page for the distributor +* [5023](https://github.com/grafana/loki/pull/5023) **ssncferreira**: Move `querier.split-queries-by-interval` to a per-tenant configuration +* [4993](https://github.com/grafana/loki/pull/4926) **thejosephstevens**: Fix parent of wal and wal_cleaner in loki ruler config docs +* [4933](https://github.com/grafana/loki/pull/4933) **jeschkies**: Support matchers in series label values query. +* [4926](https://github.com/grafana/loki/pull/4926) **thejosephstevens**: Fix comment in Loki module loading for accuracy +* [4920](https://github.com/grafana/loki/pull/4920) **chaudum**: Add `-list-targets` command line flag to list all available run targets +* [4860](https://github.com/grafana/loki/pull/4860) **cyriltovena**: Add rate limiting and metrics to hedging +* [4865](https://github.com/grafana/loki/pull/4865) **taisho6339**: Fix duplicate registry.MustRegister call in Promtail Kafka +* [4845](https://github.com/grafana/loki/pull/4845) **chaudum** Return error responses consistently as JSON * [4826](https://github.com/grafana/loki/pull/4826) **cyriltovena**: Adds the ability to hedge storage requests. * [4785](https://github.com/grafana/loki/pull/4785) **DylanGuedes**: Loki: Print current config by calling /config * [4775](https://github.com/grafana/loki/pull/4775) **jeschkies**: Make `*` and `+` non-greedy to double regex filter speed. diff --git a/production/ksonnet/loki/query-scheduler.libsonnet b/production/ksonnet/loki/query-scheduler.libsonnet index 4f71e2f7de527..99a7ada9ab434 100644 --- a/production/ksonnet/loki/query-scheduler.libsonnet +++ b/production/ksonnet/loki/query-scheduler.libsonnet @@ -7,10 +7,10 @@ local k = import 'ksonnet-util/kausal.libsonnet'; _config+:: { loki+: if $._config.query_scheduler_enabled then { frontend+: { - scheduler_address: 'query-scheduler-discovery.%s.svc.cluster.local:9095' % $._config.namespace, + scheduler_address: 'query-scheduler-discovery.%s.svc.cluster.local.:9095' % $._config.namespace, }, frontend_worker+: { - scheduler_address: 'query-scheduler-discovery.%s.svc.cluster.local:9095' % $._config.namespace, + scheduler_address: 'query-scheduler-discovery.%s.svc.cluster.local.:9095' % $._config.namespace, }, query_scheduler+: { max_outstanding_requests_per_tenant: max_outstanding, @@ -20,7 +20,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; max_outstanding_per_tenant: max_outstanding, }, frontend_worker+: { - frontend_address: 'query-frontend.%s.svc.cluster.local:9095' % $._config.namespace, + frontend_address: 'query-frontend.%s.svc.cluster.local.:9095' % $._config.namespace, }, }, }, From 38b1d3b914bae88f8c9145f6edf403c77fd8de6f Mon Sep 17 00:00:00 2001 From: Susana Ferreira Date: Mon, 2 May 2022 10:31:27 +0200 Subject: [PATCH 048/223] Update Loki reads/writes resources dashboards (#5765) --- .../dashboards/loki-reads-resources.libsonnet | 18 +++++------------- .../dashboards/loki-writes-resources.libsonnet | 13 ++++--------- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet index 25e94fe887859..09df8b813fd5b 100644 --- a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet +++ b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet @@ -1,7 +1,8 @@ +local grafana = import 'grafonnet/grafana.libsonnet'; local utils = import 'mixin-utils/utils.libsonnet'; (import 'dashboard-utils.libsonnet') { - grafanaDashboards+: + grafanaDashboards+:: { 'loki-reads-resources.json': ($.dashboard('Loki / Reads Resources', uid='reads-resources')) @@ -45,7 +46,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) ) .addRow( - $.row('Querier') + grafana.row.new('Querier') .addPanel( $.containerCPUUsagePanel('CPU', 'querier'), ) @@ -55,9 +56,6 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( $.goHeapInUsePanel('Memory (go heap inuse)', 'querier'), ) - ) - .addRow( - $.row('') .addPanel( $.panel('Disk Writes') + $.queryPanel( @@ -81,7 +79,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) ) .addRow( - $.row('Index Gateway') + grafana.row.new('Index Gateway') .addPanel( $.containerCPUUsagePanel('CPU', 'index-gateway'), ) @@ -91,9 +89,6 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( $.goHeapInUsePanel('Memory (go heap inuse)', 'index-gateway'), ) - ) - .addRow( - $.row('') .addPanel( $.panel('Disk Writes') + $.queryPanel( @@ -136,7 +131,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) ) .addRow( - $.row('Ruler') + grafana.row.new('Ruler') .addPanel( $.panel('Rules') + $.queryPanel( @@ -147,9 +142,6 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( $.containerCPUUsagePanel('CPU', 'ruler'), ) - ) - .addRow( - $.row('') .addPanel( $.containerMemoryWorkingSetPanel('Memory (workingset)', 'ruler'), ) diff --git a/production/loki-mixin/dashboards/loki-writes-resources.libsonnet b/production/loki-mixin/dashboards/loki-writes-resources.libsonnet index 3d8068a34af77..95855da1e9995 100644 --- a/production/loki-mixin/dashboards/loki-writes-resources.libsonnet +++ b/production/loki-mixin/dashboards/loki-writes-resources.libsonnet @@ -1,10 +1,11 @@ +local grafana = import 'grafonnet/grafana.libsonnet'; local utils = import 'mixin-utils/utils.libsonnet'; (import 'dashboard-utils.libsonnet') { - grafanaDashboards+: + grafanaDashboards+:: { 'loki-writes-resources.json': - $.dashboard('Loki / Writes Resources', uid='writes-resources') + ($.dashboard('Loki / Writes Resources', uid='writes-resources')) .addCluster() .addNamespace() .addTag() @@ -33,7 +34,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) ) .addRow( - $.row('Ingester') + grafana.row.new('Ingester') .addPanel( $.panel('In-memory streams') + $.queryPanel( @@ -47,18 +48,12 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( $.containerCPUUsagePanel('CPU', 'ingester'), ) - ) - .addRow( - $.row('') .addPanel( $.containerMemoryWorkingSetPanel('Memory (workingset)', 'ingester'), ) .addPanel( $.goHeapInUsePanel('Memory (go heap inuse)', 'ingester'), ) - ) - .addRow( - $.row('') .addPanel( $.panel('Disk Writes') + $.queryPanel( From 179d741c5fec86d1f1e70de02a8d20dedc862a93 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Mon, 2 May 2022 13:57:42 +0300 Subject: [PATCH 049/223] promtail: Add support for exclusion patterns in `static_config` (#5943) * PR base Signed-off-by: Paschalis Tsilias * Add CHANGELOG entry; updated documentation for new label Signed-off-by: Paschalis Tsilias * Follow logic from t.path in exlusion case; always handle matching Signed-off-by: Paschalis Tsilias * Make error message unique Signed-off-by: Paschalis Tsilias * Only run if t.pathExclude is set Signed-off-by: Paschalis Tsilias * Revert "Only run if t.pathExclude is set" This reverts commit 17abb006ce141071c4bca4b7d133bea2aa314594. --- CHANGELOG.md | 1 + .../pkg/promtail/targets/file/filetarget.go | 25 +++- .../promtail/targets/file/filetarget_test.go | 138 +++++++++++++++++- .../targets/file/filetargetmanager.go | 13 +- .../sources/clients/promtail/configuration.md | 3 + docs/sources/clients/promtail/scraping.md | 6 + 6 files changed, 179 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1cd34f92ef92..568ed3ae2a321 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## Main * [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. +* [5943](https://github.com/grafana/loki/pull/5943) **tpaschalis**: Add support for exclusion patterns in Promtail's static_config * [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config diff --git a/clients/pkg/promtail/targets/file/filetarget.go b/clients/pkg/promtail/targets/file/filetarget.go index 8c69f081ddd15..e8f4a5d515dc8 100644 --- a/clients/pkg/promtail/targets/file/filetarget.go +++ b/clients/pkg/promtail/targets/file/filetarget.go @@ -69,6 +69,7 @@ type FileTarget struct { targetEventHandler chan fileTargetEvent watches map[string]struct{} path string + pathExclude string quit chan struct{} done chan struct{} @@ -84,6 +85,7 @@ func NewFileTarget( handler api.EntryHandler, positions positions.Positions, path string, + pathExclude string, labels model.LabelSet, discoveredLabels model.LabelSet, targetConfig *Config, @@ -94,6 +96,7 @@ func NewFileTarget( logger: logger, metrics: metrics, path: path, + pathExclude: pathExclude, labels: labels, discoveredLabels: discoveredLabels, handler: api.AddLabelsMiddleware(labels).Wrap(handler), @@ -184,7 +187,7 @@ func (t *FileTarget) run() { } func (t *FileTarget) sync() error { - var matches []string + var matches, matchesExcluded []string if fi, err := os.Stat(t.path); err == nil && !fi.IsDir() { // if the path points to a file that exists, then it we can skip the Glob search matches = []string{t.path} @@ -196,8 +199,26 @@ func (t *FileTarget) sync() error { } } + if fi, err := os.Stat(t.pathExclude); err == nil && !fi.IsDir() { + matchesExcluded = []string{t.pathExclude} + } else { + matchesExcluded, err = doublestar.Glob(t.pathExclude) + if err != nil { + return errors.Wrap(err, "filetarget.sync.filepathexclude.Glob") + } + } + + for i := 0; i < len(matchesExcluded); i++ { + for j := 0; j < len(matches); j++ { + if matchesExcluded[i] == matches[j] { + // exclude this specific match + matches = append(matches[:j], matches[j+1:]...) + } + } + } + if len(matches) == 0 { - level.Debug(t.logger).Log("msg", "no files matched requested path, nothing will be tailed", "path", t.path) + level.Debug(t.logger).Log("msg", "no files matched requested path, nothing will be tailed", "path", t.path, "pathExclude", t.pathExclude) } // Gets absolute path for each pattern. diff --git a/clients/pkg/promtail/targets/file/filetarget_test.go b/clients/pkg/promtail/targets/file/filetarget_test.go index b4647a3128b5a..7f9a5ccea47be 100644 --- a/clients/pkg/promtail/targets/file/filetarget_test.go +++ b/clients/pkg/promtail/targets/file/filetarget_test.go @@ -67,7 +67,7 @@ func TestFileTargetSync(t *testing.T) { } }() path := logDir1 + "/*.log" - target, err := NewFileTarget(metrics, logger, client, ps, path, nil, nil, &Config{ + target, err := NewFileTarget(metrics, logger, client, ps, path, "", nil, nil, &Config{ SyncPeriod: 1 * time.Minute, // assure the sync is not called by the ticker }, nil, fakeHandler) assert.NoError(t, err) @@ -165,6 +165,140 @@ func TestFileTargetSync(t *testing.T) { ps.Stop() } +func TestFileTargetPathExclusion(t *testing.T) { + w := log.NewSyncWriter(os.Stderr) + logger := log.NewLogfmtLogger(w) + + dirName := newTestLogDirectories(t) + positionsFileName := filepath.Join(dirName, "positions.yml") + logDir1 := filepath.Join(dirName, "log1") + logDir2 := filepath.Join(dirName, "log2") + logDir3 := filepath.Join(dirName, "log3") + logFiles := []string{ + filepath.Join(logDir1, "test1.log"), + filepath.Join(logDir1, "test2.log"), + filepath.Join(logDir2, "test1.log"), + filepath.Join(logDir3, "test1.log"), + filepath.Join(logDir3, "test2.log"), + } + + // Set the sync period to a really long value, to guarantee the sync timer never runs, this way we know + // everything saved was done through channel notifications when target.stop() was called. + ps, err := positions.New(logger, positions.Config{ + SyncPeriod: 10 * time.Minute, + PositionsFile: positionsFileName, + }) + if err != nil { + t.Fatal(err) + } + + client := fake.New(func() {}) + defer client.Stop() + + metrics := NewMetrics(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fakeHandler := make(chan fileTargetEvent) + receivedStartWatch := atomic.NewInt32(0) + receivedStopWatch := atomic.NewInt32(0) + go func() { + for { + select { + case event := <-fakeHandler: + switch event.eventType { + case fileTargetEventWatchStart: + receivedStartWatch.Add(1) + case fileTargetEventWatchStop: + receivedStopWatch.Add(1) + } + case <-ctx.Done(): + return + } + } + }() + path := filepath.Join(dirName, "**", "*.log") + pathExclude := filepath.Join(dirName, "log3", "*.log") + target, err := NewFileTarget(metrics, logger, client, ps, path, pathExclude, nil, nil, &Config{ + SyncPeriod: 1 * time.Minute, // assure the sync is not called by the ticker + }, nil, fakeHandler) + assert.NoError(t, err) + + // Start with nothing watched. + if len(target.watches) != 0 { + t.Fatal("Expected watches to be 0 at this point in the test...") + } + if len(target.tails) != 0 { + t.Fatal("Expected tails to be 0 at this point in the test...") + } + + // Create the base directories, still nothing watched. + err = os.MkdirAll(logDir1, 0750) + assert.NoError(t, err) + err = os.MkdirAll(logDir2, 0750) + assert.NoError(t, err) + err = os.MkdirAll(logDir3, 0750) + assert.NoError(t, err) + + err = target.sync() + assert.NoError(t, err) + + if len(target.watches) != 0 { + t.Fatal("Expected watches to be 0 at this point in the test...") + } + if len(target.tails) != 0 { + t.Fatal("Expected tails to be 0 at this point in the test...") + } + + // Create all the files, which should create two directory watchers and three file tailers. + for _, f := range logFiles { + _, err = os.Create(f) + assert.NoError(t, err) + } + + // Delay sync() call to make sure the filesystem watch event does not fire during sync() + time.Sleep(10 * time.Millisecond) + err = target.sync() + assert.NoError(t, err) + + assert.Equal(t, 2, len(target.watches), + "Expected watches to be 2 at this point in the test...", + ) + assert.Equal(t, 3, len(target.tails), + "Expected tails to be 3 at this point in the test...", + ) + require.Eventually(t, func() bool { + return receivedStartWatch.Load() == 2 + }, time.Second*10, time.Millisecond*1, "Expected received starting watch event to be 2 at this point in the test...") + require.Eventually(t, func() bool { + return receivedStopWatch.Load() == 0 + }, time.Second*10, time.Millisecond*1, "Expected received stopping watch event to be 0 at this point in the test...") + + // Remove the first directory, other tailer should stop and its watchers should go away. + // Only the non-excluded `logDir2` should be watched. + err = os.RemoveAll(logDir1) + assert.NoError(t, err) + + err = target.sync() + assert.NoError(t, err) + + assert.Equal(t, 1, len(target.watches), + "Expected watches to be 1 at this point in the test...", + ) + assert.Equal(t, 1, len(target.tails), + "Expected tails to be 1 at this point in the test...", + ) + require.Eventually(t, func() bool { + return receivedStartWatch.Load() == 2 + }, time.Second*10, time.Millisecond*1, "Expected received starting watch event to still be 2 at this point in the test...") + require.Eventually(t, func() bool { + return receivedStopWatch.Load() == 1 + }, time.Second*10, time.Millisecond*1, "Expected received stopping watch event to be 1 at this point in the test...") + + target.Stop() + ps.Stop() +} + func TestHandleFileCreationEvent(t *testing.T) { w := log.NewSyncWriter(os.Stderr) logger := log.NewLogfmtLogger(w) @@ -209,7 +343,7 @@ func TestHandleFileCreationEvent(t *testing.T) { } }() - target, err := NewFileTarget(metrics, logger, client, ps, path, nil, nil, &Config{ + target, err := NewFileTarget(metrics, logger, client, ps, path, "", nil, nil, &Config{ // To handle file creation event from channel, set enough long time as sync period SyncPeriod: 10 * time.Minute, }, fakeFileHandler, fakeTargetHandler) diff --git a/clients/pkg/promtail/targets/file/filetargetmanager.go b/clients/pkg/promtail/targets/file/filetargetmanager.go index 90a404f4cb00d..f1efb35a1388f 100644 --- a/clients/pkg/promtail/targets/file/filetargetmanager.go +++ b/clients/pkg/promtail/targets/file/filetargetmanager.go @@ -31,6 +31,7 @@ import ( const ( pathLabel = "__path__" + pathExcludeLabel = "__path_exclude__" hostLabel = "__host__" kubernetesPodNodeField = "spec.nodeName" ) @@ -310,6 +311,8 @@ func (s *targetSyncer) sync(groups []*targetgroup.Group, targetEventHandler chan continue } + pathExclude := labels[pathExcludeLabel] + for k := range labels { if strings.HasPrefix(string(k), "__") { delete(labels, k) @@ -317,6 +320,10 @@ func (s *targetSyncer) sync(groups []*targetgroup.Group, targetEventHandler chan } key := fmt.Sprintf("%s:%s", path, labels.String()) + if pathExclude != "" { + key = fmt.Sprintf("%s:%s", key, pathExclude) + } + targets[key] = struct{}{} if _, ok := s.targets[key]; ok { dropped = append(dropped, target.NewDroppedTarget("ignoring target, already exists", discoveredLabels)) @@ -333,7 +340,7 @@ func (s *targetSyncer) sync(groups []*targetgroup.Group, targetEventHandler chan watcher = make(chan fsnotify.Event) s.fileEventWatchers[wkey] = watcher } - t, err := s.newTarget(wkey, labels, discoveredLabels, watcher, targetEventHandler) + t, err := s.newTarget(wkey, string(pathExclude), labels, discoveredLabels, watcher, targetEventHandler) if err != nil { dropped = append(dropped, target.NewDroppedTarget(fmt.Sprintf("Failed to create target: %s", err.Error()), discoveredLabels)) level.Error(s.log).Log("msg", "Failed to create target", "key", key, "error", err) @@ -387,8 +394,8 @@ func (s *targetSyncer) sendFileCreateEvent(event fsnotify.Event) { } } -func (s *targetSyncer) newTarget(path string, labels model.LabelSet, discoveredLabels model.LabelSet, fileEventWatcher chan fsnotify.Event, targetEventHandler chan fileTargetEvent) (*FileTarget, error) { - return NewFileTarget(s.metrics, s.log, s.entryHandler, s.positions, path, labels, discoveredLabels, s.targetConfig, fileEventWatcher, targetEventHandler) +func (s *targetSyncer) newTarget(path, pathExclude string, labels model.LabelSet, discoveredLabels model.LabelSet, fileEventWatcher chan fsnotify.Event, targetEventHandler chan fileTargetEvent) (*FileTarget, error) { + return NewFileTarget(s.metrics, s.log, s.entryHandler, s.positions, path, pathExclude, labels, discoveredLabels, s.targetConfig, fileEventWatcher, targetEventHandler) } func (s *targetSyncer) DroppedTargets() []target.Target { diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md index 3fbac0135ea62..713e85895a40b 100644 --- a/docs/sources/clients/promtail/configuration.md +++ b/docs/sources/clients/promtail/configuration.md @@ -1284,6 +1284,9 @@ labels: # The path to load logs from. Can use glob patterns (e.g., /var/log/*.log). __path__: + # Used to exclude files from being loaded. Can also use glob patterns. + __path_exclude__: + # Additional labels to assign to the logs [ : ... ] ``` diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md index aa39797a26e41..6be07a852de70 100644 --- a/docs/sources/clients/promtail/scraping.md +++ b/docs/sources/clients/promtail/scraping.md @@ -53,6 +53,12 @@ There are different types of labels present in Promtail: - The `__path__` label is a special label which Promtail uses after discovery to figure out where the file to read is located. Wildcards are allowed, for example `/var/log/*.log` to get all files with a `log` extension in the specified directory, and `/var/log/**/*.log` for matching files and directories recursively. For a full list of options check out the docs for the [library](https://github.com/bmatcuk/doublestar) Promtail uses. +- The `__path_exclude__` label is another special label Promtail uses after + discovery, to exclude a subset of the files discovered using `__path__` from + being read in the current scrape_config block. It uses the same + [library](https://github.com/bmatcuk/doublestar) to enable usage of + wildcards and glob patterns. + - The label `filename` is added for every file found in `__path__` to ensure the uniqueness of the streams. It is set to the absolute path of the file the line was read from. From 8cd4694adbc7b3236e8908e14f51283e027a078d Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Mon, 2 May 2022 17:31:57 +0530 Subject: [PATCH 050/223] allow more time for boltdb-shipper index syncs to finish (#6071) * retains index files with ingester for 20 mins instead of 12 mins after uploading * sets GetChunkIDs lookback to 2h41m instead of 2h28m --- pkg/loki/modules.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 6e648da86b4a0..d3660d1631b4c 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -412,13 +412,13 @@ func (t *Loki) initStore() (_ services.Service, err error) { // have query gaps on chunks flushed after an index entry is cached by keeping them retained in the ingester // and queried as part of live data until the cache TTL expires on the index entry. t.Cfg.Ingester.RetainPeriod = t.Cfg.StorageConfig.IndexCacheValidity + 1*time.Minute - t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = boltdbShipperQuerierIndexUpdateDelay(t.Cfg) + 2*time.Minute + t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = boltdbShipperQuerierIndexUpdateDelay(t.Cfg) case t.Cfg.isModuleEnabled(Querier), t.Cfg.isModuleEnabled(Ruler), t.Cfg.isModuleEnabled(Read), t.isModuleActive(IndexGateway): // We do not want query to do any updates to index t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeReadOnly default: t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeReadWrite - t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = boltdbShipperQuerierIndexUpdateDelay(t.Cfg) + 2*time.Minute + t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = boltdbShipperQuerierIndexUpdateDelay(t.Cfg) } } @@ -955,10 +955,12 @@ func calculateAsyncStoreQueryIngestersWithin(queryIngestersWithinConfig, minDura } // boltdbShipperQuerierIndexUpdateDelay returns duration it could take for queriers to serve the index since it was uploaded. +// It considers upto 3 sync attempts for the indexgateway/queries to be successful in syncing the files to factor in worst case scenarios like +// failures in sync, low download throughput, various kinds of caches in between etc. which can delay the sync operation from getting all the updates from the storage. // It also considers index cache validity because a querier could have cached index just before it was going to resync which means // it would keep serving index until the cache entries expire. func boltdbShipperQuerierIndexUpdateDelay(cfg Config) time.Duration { - return cfg.StorageConfig.IndexCacheValidity + cfg.StorageConfig.BoltDBShipperConfig.ResyncInterval + return cfg.StorageConfig.IndexCacheValidity + cfg.StorageConfig.BoltDBShipperConfig.ResyncInterval*3 } // boltdbShipperIngesterIndexUploadDelay returns duration it could take for an index file containing id of a chunk to be uploaded to the shared store since it got flushed. @@ -967,9 +969,9 @@ func boltdbShipperIngesterIndexUploadDelay() time.Duration { } // boltdbShipperMinIngesterQueryStoreDuration returns minimum duration(with some buffer) ingesters should query their stores to -// avoid missing any logs or chunk ids due to async nature of BoltDB Shipper. +// avoid queriers from missing any logs or chunk ids due to async nature of BoltDB Shipper. func boltdbShipperMinIngesterQueryStoreDuration(cfg Config) time.Duration { - return cfg.Ingester.MaxChunkAge + boltdbShipperIngesterIndexUploadDelay() + boltdbShipperQuerierIndexUpdateDelay(cfg) + 2*time.Minute + return cfg.Ingester.MaxChunkAge + boltdbShipperIngesterIndexUploadDelay() + boltdbShipperQuerierIndexUpdateDelay(cfg) + 5*time.Minute } // NewServerService constructs service from Server component. From 9c19b0f5482f23c121780eb84d81cc451dba82f4 Mon Sep 17 00:00:00 2001 From: "W.T. Chang" <1546333+wtchangdm@users.noreply.github.com> Date: Mon, 2 May 2022 21:51:28 +0800 Subject: [PATCH 051/223] Fix broken link on deployment-modes.md (#6068) The configuration link should go another parent level further in order to get `docs/loki/latest/configuration/`, currently it points to `docs/loki/latest/fundamentals/configuration/`. --- docs/sources/fundamentals/architecture/deployment-modes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/fundamentals/architecture/deployment-modes.md b/docs/sources/fundamentals/architecture/deployment-modes.md index b35dae81f5a1c..151d31a7cd330 100644 --- a/docs/sources/fundamentals/architecture/deployment-modes.md +++ b/docs/sources/fundamentals/architecture/deployment-modes.md @@ -35,7 +35,7 @@ as well as for small read/write volumes of up to approximately 100GB per day. Horizontally scale up a monolithic mode deployment to more instances by using a shared object store, and by configuring the -[`memberlist_config` section](../../configuration/#memberlist_config) +[`memberlist_config` section](../../../configuration/#memberlist_config) to share state between all instances. High availability can be configured by running two Loki instances From d9327ecd0c508a42eb270843ae9e61aad62b55b6 Mon Sep 17 00:00:00 2001 From: Corentin Altepe Date: Mon, 2 May 2022 15:53:38 +0200 Subject: [PATCH 052/223] Added NLog-Targets-Loki as unofficial C# client to Loki (#6061) Added a link to the HTTP C# client [NLog-Targets-Loki](https://github.com/corentinaltepe/nlog.loki) in Loki's clients documentation in the unofficial clients sections, right next to its Serilog counterpart. --- docs/sources/clients/_index.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/clients/_index.md b/docs/sources/clients/_index.md index 52060cd7db41c..08a445947fe61 100644 --- a/docs/sources/clients/_index.md +++ b/docs/sources/clients/_index.md @@ -70,6 +70,7 @@ when using or writing a third-party client. - [promtail-client](https://github.com/afiskon/promtail-client) (Go) - [push-to-loki.py](https://github.com/sleleko/devops-kb/blob/master/python/push-to-loki.py) (Python 3) - [Serilog-Sinks-Loki](https://github.com/JosephWoodward/Serilog-Sinks-Loki) (C#) +- [NLog-Targets-Loki](https://github.com/corentinaltepe/nlog.loki) (C#) - [loki-logback-appender](https://github.com/loki4j/loki-logback-appender) (Java) - [Log4j2 appender for Loki](https://github.com/tkowalcz/tjahzi) (Java) - [LokiLogger.jl](https://github.com/JuliaLogging/LokiLogger.jl) (Julia) From 00e210aa5113ee4fcb9fee7ed01d5b6a77bfbbc1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 16:33:11 +0200 Subject: [PATCH 053/223] Bump actions/checkout from 2 to 3 (#5558) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/backport.yml | 2 +- .github/workflows/issue_commands.yml | 2 +- .github/workflows/metrics-collector.yml | 2 +- .github/workflows/operator-bundle.yaml | 2 +- .github/workflows/operator-images.yaml | 6 +++--- .github/workflows/operator-scorecard.yaml | 2 +- .github/workflows/operator.yaml | 8 ++++---- .github/workflows/publish.yml | 4 ++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 0689bbf2c2b28..820d2758307ef 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Actions - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: "grafana/grafana-github-actions" path: ./actions diff --git a/.github/workflows/issue_commands.yml b/.github/workflows/issue_commands.yml index 57a05f677cc26..d09e2a09c3795 100644 --- a/.github/workflows/issue_commands.yml +++ b/.github/workflows/issue_commands.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Actions - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: "grafana/grafana-github-actions" path: ./actions diff --git a/.github/workflows/metrics-collector.yml b/.github/workflows/metrics-collector.yml index 4ae64c4c8e3f7..5b227db2e8e89 100644 --- a/.github/workflows/metrics-collector.yml +++ b/.github/workflows/metrics-collector.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Actions - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: "grafana/grafana-github-actions" path: ./actions diff --git a/.github/workflows/operator-bundle.yaml b/.github/workflows/operator-bundle.yaml index 2f7b1684be548..6712a4e467701 100644 --- a/.github/workflows/operator-bundle.yaml +++ b/.github/workflows/operator-bundle.yaml @@ -23,7 +23,7 @@ jobs: with: go-version: ${{ matrix.go }} id: go - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install make run: sudo apt-get install make - name: make bundle diff --git a/.github/workflows/operator-images.yaml b/.github/workflows/operator-images.yaml index 500d3b063bef2..c7b5ba81b0ae6 100644 --- a/.github/workflows/operator-images.yaml +++ b/.github/workflows/operator-images.yaml @@ -18,7 +18,7 @@ jobs: publish-manager: runs-on: ubuntu-latest steps: - - uses: actions/checkout@master + - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -56,7 +56,7 @@ jobs: publish-bundle: runs-on: ubuntu-latest steps: - - uses: actions/checkout@master + - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -95,7 +95,7 @@ jobs: publish-size-calculator: runs-on: ubuntu-latest steps: - - uses: actions/checkout@master + - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 diff --git a/.github/workflows/operator-scorecard.yaml b/.github/workflows/operator-scorecard.yaml index 2846c28c22f60..fa6dffc087dee 100644 --- a/.github/workflows/operator-scorecard.yaml +++ b/.github/workflows/operator-scorecard.yaml @@ -26,7 +26,7 @@ jobs: - uses: engineerd/setup-kind@v0.5.0 with: version: "v0.11.1" - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install make run: sudo apt-get install make - name: Run scorecard diff --git a/.github/workflows/operator.yaml b/.github/workflows/operator.yaml index c41d9f09193e1..dcb3505b5872a 100644 --- a/.github/workflows/operator.yaml +++ b/.github/workflows/operator.yaml @@ -25,7 +25,7 @@ jobs: with: go-version: ${{ matrix.go }} id: go - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Lint uses: golangci/golangci-lint-action@v3.1.0 with: @@ -52,7 +52,7 @@ jobs: with: go-version: ${{ matrix.go }} id: go - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build Manager working-directory: ./operator run: |- @@ -73,7 +73,7 @@ jobs: with: go-version: ${{ matrix.go }} id: go - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build Broker working-directory: ./operator run: |- @@ -94,7 +94,7 @@ jobs: with: go-version: ${{ matrix.go }} id: go - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Run tests working-directory: ./operator run: go test -coverprofile=profile.cov ./... diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 1ecde748a6682..6e273d86f5d65 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -12,7 +12,7 @@ jobs: # runs-on: ubuntu-latest # steps: # - name: Check out code - # uses: actions/checkout@v1 + # uses: actions/checkout@v3 # - name: Build Website # run: | # docker run -v ${PWD}/docs/sources:/hugo/content/docs/loki/latest --rm grafana/docs-base:latest /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod' @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest # needs: test steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - run: git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.GH_BOT_ACCESS_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync - name: publish-to-git uses: ./.github/actions/website-sync From bb2e952d6fcf1d1b79f393b8b8cf959af3f89bd8 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Mon, 2 May 2022 17:00:00 +0200 Subject: [PATCH 054/223] Fix(query-frontend): `/label//values` endpoint to use right set of middlewares (#6072) * Fix(query-frontend): `/label//values` endpoint to use right middleware Previously, only `/label/` and `/labels` endpoint were using correct tripperware middleware on the query frontend(e.g: stats collector middleware) This PR fixes it for `/label//values` endpoint as well. This is useful to support `metrics.go` stats middleware for `label/{name}/values` endpoint as well. Signed-off-by: Kaviraj * Remove `unhandledpath` test Signed-off-by: Kaviraj * Remove unused test helper Signed-off-by: Kaviraj --- pkg/querier/queryrange/roundtrip.go | 2 +- pkg/querier/queryrange/roundtrip_test.go | 107 +++++++++++++++-------- 2 files changed, 73 insertions(+), 36 deletions(-) diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 825e9e7ba7f4e..f85389099f6f2 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -232,7 +232,7 @@ func getOperation(path string) string { return QueryRangeOp case strings.HasSuffix(path, "/series"): return SeriesOp - case strings.HasSuffix(path, "/labels") || strings.HasSuffix(path, "/label"): + case strings.HasSuffix(path, "/labels") || strings.HasSuffix(path, "/label") || strings.HasSuffix(path, "/values"): return LabelNamesOp case strings.HasSuffix(path, "/v1/query"): return InstantQueryOp diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index f5df5c54be724..ae4591ae6529a 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -16,6 +16,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" @@ -377,30 +378,6 @@ func TestLogNoRegex(t *testing.T) { require.NoError(t, err) } -func TestUnhandledPath(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, config.SchemaConfig{}, nil, nil) - if stopper != nil { - defer stopper.Stop() - } - require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() - - ctx := user.InjectOrgID(context.Background(), "1") - req, err := http.NewRequest(http.MethodGet, "/loki/api/v1/labels/foo/values", nil) - require.NoError(t, err) - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - count, h := errorResult() - rt.setHandler(h) - _, err = tpw(rt).RoundTrip(req) - require.Equal(t, 1, *count) - require.NoError(t, err) -} - func TestRegexpParamsSupport(t *testing.T) { l := WithSplitByLimits(fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, 4*time.Hour) tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, l, config.SchemaConfig{}, nil, nil) @@ -547,6 +524,77 @@ func TestEntriesLimitWithZeroTripperware(t *testing.T) { require.NoError(t, err) } +func Test_getOperation(t *testing.T) { + cases := []struct { + name string + path string + expectedOp string + }{ + { + name: "instant_query", + path: "/loki/api/v1/query", + expectedOp: InstantQueryOp, + }, + { + name: "range_query_prom", + path: "/prom/query", + expectedOp: QueryRangeOp, + }, + { + name: "range_query", + path: "/loki/api/v1/query_range", + expectedOp: QueryRangeOp, + }, + { + name: "series_query", + path: "/loki/api/v1/series", + expectedOp: SeriesOp, + }, + { + name: "series_query_prom", + path: "/prom/series", + expectedOp: SeriesOp, + }, + { + name: "labels_query", + path: "/loki/api/v1/labels", + expectedOp: LabelNamesOp, + }, + { + name: "labels_query_prom", + path: "/prom/labels", + expectedOp: LabelNamesOp, + }, + { + name: "label_query", + path: "/loki/api/v1/label", + expectedOp: LabelNamesOp, + }, + { + name: "labels_query_prom", + path: "/prom/label", + expectedOp: LabelNamesOp, + }, + { + name: "label_values_query", + path: "/loki/api/v1/label/__name__/values", + expectedOp: LabelNamesOp, + }, + { + name: "label_values_query_prom", + path: "/prom/label/__name__/values", + expectedOp: LabelNamesOp, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := getOperation(tc.path) + assert.Equal(t, tc.expectedOp, got) + }) + } +} + type fakeLimits struct { maxQueryLength time.Duration maxQueryParallelism int @@ -605,17 +653,6 @@ func counter() (*int, http.Handler) { }) } -func errorResult() (*int, http.Handler) { - count := 0 - var lock sync.Mutex - return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - lock.Lock() - defer lock.Unlock() - count++ - w.WriteHeader(http.StatusInternalServerError) - }) -} - func promqlResult(v parser.Value) (*int, http.Handler) { count := 0 var lock sync.Mutex From 98fda9b0a063f5a4160802ccd8490f1aa18d3d54 Mon Sep 17 00:00:00 2001 From: Ed Welch Date: Mon, 2 May 2022 14:34:19 -0400 Subject: [PATCH 055/223] Loki: Change live tailing to only allow mutating the log line not the number of streams. (#6063) * don't create more streams when live tailing logs, process the log line with the pipeline to allow mutating the content of the line but don't split it into new streams. Signed-off-by: Edward Welch * update changelog Signed-off-by: Edward Welch * don't mutate the incoming streams Signed-off-by: Edward Welch * do not reuse the pipeline because it caches labels Signed-off-by: Edward Welch * add note to upgrade guide Signed-off-by: Edward Welch --- CHANGELOG.md | 1 + docs/sources/upgrading/_index.md | 24 +++++++++++++ pkg/ingester/tailer.go | 59 ++++++++++++++------------------ 3 files changed, 50 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 568ed3ae2a321..892819b080234 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ ##### Fixes * [5685](https://github.com/grafana/loki/pull/5685) **chaudum**: Assert that push values tuples consist of string values ##### Changes +* [6063](https://github.com/grafana/loki/pull/6063) **slim-bean**: Changes tailing API responses to not split a stream when using parsers in the query * [6042](https://github.com/grafana/loki/pull/6042) **slim-bean**: Add a new configuration to allow fudging of ingested timestamps to guarantee sort order of duplicate timestamps at query time. * [5777](https://github.com/grafana/loki/pull/5777) **tatchiuleung**: storage: make Azure blobID chunk delimiter configurable * [5650](https://github.com/grafana/loki/pull/5650) **cyriltovena**: Remove more chunkstore and schema version below v9 diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md index 4b012c2b7bb74..5dc4f8dfabd8e 100644 --- a/docs/sources/upgrading/_index.md +++ b/docs/sources/upgrading/_index.md @@ -31,6 +31,30 @@ The output is incredibly verbose as it shows the entire internal config struct u ## Main / Unreleased +### Loki + +#### Tail API no longer creates multiple streams when using parsers. + +We expect this change to be non-impactful however it is a breaking change to existing behavior. + +This change would likely only affect anyone who's doing machine to machine type work with Loki's tail API +and is expecting a parser in a query to alter the streams in a tail response. + +Prior to this change a tail request with a parser (e.g. json, logfmt, regexp, pattern) would split the +incoming log stream into multiple streams based on the extracted labels after running the parser. + +[PR 6063](https://github.com/grafana/loki/pull/6063) changes this behavior +to no longer split incoming streams when using a parser in the query, instead Loki will return exactly +the same streams with and without a parser in the query. + +We found a significant performance impact when using parsers on live tailing queries which would +result in turning a single stream with multiple entries into multiple streams with single entries. +Often leading to the case where the tailing client could not keep up with the number of streams +being pushed and tailing logs being dropped. + +This change will have no impact on viewing the tail output from Grafana or logcli. +Parsers can still be used to do filtering and reformatting of live tailed log lines. + ## 2.5.0 ### Loki diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index 4d034d7acb37b..4a16f155de47f 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -28,8 +28,7 @@ type tailer struct { id uint32 orgID string matchers []*labels.Matcher - pipeline syntax.Pipeline - expr syntax.Expr + expr syntax.LogSelectorExpr pipelineMtx sync.Mutex sendChan chan *logproto.Stream @@ -52,7 +51,9 @@ func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*ta if err != nil { return nil, err } - pipeline, err := expr.Pipeline() + // Make sure we can build a pipeline. The stream processing code doesn't have a place to handle + // this error so make sure we handle it here. + _, err = expr.Pipeline() if err != nil { return nil, err } @@ -61,7 +62,6 @@ func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*ta return &tailer{ orgID: orgID, matchers: matchers, - pipeline: pipeline, sendChan: make(chan *logproto.Stream, bufferSizeForTailResponse), conn: conn, droppedStreams: make([]*logproto.DroppedStream, 0, maxDroppedStreams), @@ -121,53 +121,44 @@ func (t *tailer) send(stream logproto.Stream, lbs labels.Labels) { return } - streams := t.processStream(stream, lbs) - if len(streams) == 0 { - return - } - for _, s := range streams { - select { - case t.sendChan <- s: - default: - t.dropStream(*s) - } + processed := t.processStream(stream, lbs) + select { + case t.sendChan <- processed: + default: + t.dropStream(stream) } } -func (t *tailer) processStream(stream logproto.Stream, lbs labels.Labels) []*logproto.Stream { +func (t *tailer) processStream(stream logproto.Stream, lbs labels.Labels) *logproto.Stream { + // Build a new pipeline for each call because the pipeline builds a cache of labels + // and if we don't start with a new pipeline that cache will grow unbounded. + // The error is ignored because it would be handled in the constructor of the tailer. + pipeline, _ := t.expr.Pipeline() + // Optimization: skip filtering entirely, if no filter is set - if log.IsNoopPipeline(t.pipeline) { - return []*logproto.Stream{&stream} + if log.IsNoopPipeline(pipeline) { + return &stream } // pipeline are not thread safe and tailer can process multiple stream at once. t.pipelineMtx.Lock() defer t.pipelineMtx.Unlock() - streams := map[uint64]*logproto.Stream{} - - sp := t.pipeline.ForStream(lbs) + responseStream := &logproto.Stream{ + Labels: stream.Labels, + Entries: make([]logproto.Entry, 0, len(stream.Entries)), + } + sp := pipeline.ForStream(lbs) for _, e := range stream.Entries { - newLine, parsedLbs, ok := sp.ProcessString(e.Timestamp.UnixNano(), e.Line) + newLine, _, ok := sp.ProcessString(e.Timestamp.UnixNano(), e.Line) if !ok { continue } - var stream *logproto.Stream - if stream, ok = streams[parsedLbs.Hash()]; !ok { - stream = &logproto.Stream{ - Labels: parsedLbs.String(), - } - streams[parsedLbs.Hash()] = stream - } - stream.Entries = append(stream.Entries, logproto.Entry{ + responseStream.Entries = append(responseStream.Entries, logproto.Entry{ Timestamp: e.Timestamp, Line: newLine, }) } - streamsResult := make([]*logproto.Stream, 0, len(streams)) - for _, stream := range streams { - streamsResult = append(streamsResult, stream) - } - return streamsResult + return responseStream } // isMatching returns true if lbs matches all matchers. From 7fff66014464893b519ac944979f11f38b6b2c3a Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 3 May 2022 07:37:35 +0200 Subject: [PATCH 056/223] Add UDP protocol support to Promtail's syslog target (#5790) Until now, the syslog target in Promtail did only support TCP. This PR adds support for sending syslog messages to Promtail (via ng-syslog or rsyslog) via UDP. UPD for syslogs can be used if you prefer speed over guaranteed delivery. Signed-off-by: Christian Haudum --- CHANGELOG.md | 1 + .../pkg/promtail/scrapeconfig/scrapeconfig.go | 4 + .../syslog/syslogparser/syslogparser.go | 8 +- .../promtail/targets/syslog/syslogtarget.go | 240 ++--------- .../targets/syslog/syslogtarget_test.go | 355 ++++++++++----- .../pkg/promtail/targets/syslog/transport.go | 406 ++++++++++++++++++ docs/sources/clients/promtail/scraping.md | 19 +- 7 files changed, 705 insertions(+), 328 deletions(-) create mode 100644 clients/pkg/promtail/targets/syslog/transport.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 892819b080234..ed4d4208fffb3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +* [5790](https://github.com/grafana/loki/pull/5790) **chaudum**: Add UDP support for Promtail's syslog target. * [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. * [5943](https://github.com/grafana/loki/pull/5943) **tpaschalis**: Add support for exclusion patterns in Promtail's static_config * [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. diff --git a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go index f05a4cacbceff..769755c6c662f 100644 --- a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go +++ b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go @@ -165,6 +165,10 @@ type SyslogTargetConfig struct { // ListenAddress is the address to listen on for syslog messages. ListenAddress string `yaml:"listen_address"` + // ListenProtocol is the protocol used to listen for syslog messages. + // Must be either `tcp` (default) or `udp` + ListenProtocol string `yaml:"listen_protocol"` + // IdleTimeout is the idle timeout for tcp connections. IdleTimeout time.Duration `yaml:"idle_timeout"` diff --git a/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser.go b/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser.go index adfc44d2d525a..dfc76bb3538aa 100644 --- a/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser.go +++ b/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser.go @@ -15,20 +15,20 @@ import ( // detects octet counting. // The function returns on EOF or unrecoverable errors. func ParseStream(r io.Reader, callback func(res *syslog.Result), maxMessageLength int) error { - buf := bufio.NewReader(r) + buf := bufio.NewReaderSize(r, 1<<10) - firstByte, err := buf.Peek(1) + b, err := buf.ReadByte() if err != nil { return err } + _ = buf.UnreadByte() - b := firstByte[0] if b == '<' { nontransparent.NewParser(syslog.WithListener(callback), syslog.WithMaxMessageLength(maxMessageLength), syslog.WithBestEffort()).Parse(buf) } else if b >= '0' && b <= '9' { octetcounting.NewParser(syslog.WithListener(callback), syslog.WithMaxMessageLength(maxMessageLength), syslog.WithBestEffort()).Parse(buf) } else { - return fmt.Errorf("invalid or unsupported framing. first byte: '%s'", firstByte) + return fmt.Errorf("invalid or unsupported framing. first byte: '%s'", string(b)) } return nil diff --git a/clients/pkg/promtail/targets/syslog/syslogtarget.go b/clients/pkg/promtail/targets/syslog/syslogtarget.go index c2ff44da00f89..57d2efd991b7e 100644 --- a/clients/pkg/promtail/targets/syslog/syslogtarget.go +++ b/clients/pkg/promtail/targets/syslog/syslogtarget.go @@ -1,30 +1,22 @@ package syslog import ( - "context" - "crypto/tls" - "crypto/x509" "errors" "fmt" - "io/ioutil" "net" "strings" - "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/dskit/backoff" "github.com/influxdata/go-syslog/v3" "github.com/influxdata/go-syslog/v3/rfc5424" - "github.com/mwitkow/go-conntrack" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" - "github.com/grafana/loki/clients/pkg/promtail/targets/syslog/syslogparser" "github.com/grafana/loki/clients/pkg/promtail/targets/target" "github.com/grafana/loki/pkg/logproto" @@ -33,6 +25,7 @@ import ( var ( defaultIdleTimeout = 120 * time.Second defaultMaxMessageLength = 8192 + defaultProtocol = protocolTCP ) // SyslogTarget listens to syslog messages. @@ -44,12 +37,10 @@ type SyslogTarget struct { config *scrapeconfig.SyslogTargetConfig relabelConfig []*relabel.Config - listener net.Listener - messages chan message + transport Transport - ctx context.Context - ctxCancel context.CancelFunc - openConnections *sync.WaitGroup + messages chan message + messagesDone chan struct{} } type message struct { @@ -67,144 +58,42 @@ func NewSyslogTarget( config *scrapeconfig.SyslogTargetConfig, ) (*SyslogTarget, error) { - ctx, cancel := context.WithCancel(context.Background()) - t := &SyslogTarget{ metrics: metrics, logger: logger, handler: handler, config: config, relabelConfig: relabel, - - ctx: ctx, - ctxCancel: cancel, - openConnections: new(sync.WaitGroup), + messagesDone: make(chan struct{}), + } + + switch t.transportProtocol() { + case protocolTCP: + t.transport = NewSyslogTCPTransport( + config, + t.handleMessage, + t.handleMessageError, + logger, + ) + case protocolUDP: + t.transport = NewSyslogUDPTransport( + config, + t.handleMessage, + t.handleMessageError, + logger, + ) + default: + return nil, fmt.Errorf("invalid transport protocol. expected 'tcp' or 'udp', got '%s'", t.transportProtocol()) } t.messages = make(chan message) go t.messageSender(handler.Chan()) - err := t.run() - return t, err -} - -func (t *SyslogTarget) run() error { - l, err := net.Listen("tcp", t.config.ListenAddress) - l = conntrack.NewListener(l, conntrack.TrackWithName("syslog_target/"+t.config.ListenAddress)) + err := t.transport.Run() if err != nil { - return fmt.Errorf("error setting up syslog target: %w", err) - } - - tlsEnabled := t.config.TLSConfig.CertFile != "" || t.config.TLSConfig.KeyFile != "" || t.config.TLSConfig.CAFile != "" - if tlsEnabled { - tlsConfig, err := newTLSConfig(t.config.TLSConfig.CertFile, t.config.TLSConfig.KeyFile, t.config.TLSConfig.CAFile) - if err != nil { - return fmt.Errorf("error setting up syslog target: %w", err) - } - l = tls.NewListener(l, tlsConfig) - } - - t.listener = l - level.Info(t.logger).Log("msg", "syslog listening on address", "address", t.ListenAddress().String(), "tls", tlsEnabled) - - t.openConnections.Add(1) - go t.acceptConnections() - - return nil -} - -func newTLSConfig(certFile string, keyFile string, caFile string) (*tls.Config, error) { - if certFile == "" || keyFile == "" { - return nil, fmt.Errorf("certificate and key files are required") - } - - certs, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, fmt.Errorf("unable to load server certificate or key: %w", err) - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{certs}, - } - - if caFile != "" { - caCert, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("unable to load client CA certificate: %w", err) - } - - caCertPool := x509.NewCertPool() - if ok := caCertPool.AppendCertsFromPEM(caCert); !ok { - return nil, fmt.Errorf("unable to parse client CA certificate") - } - - tlsConfig.ClientCAs = caCertPool - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - } - - return tlsConfig, nil -} - -func (t *SyslogTarget) acceptConnections() { - defer t.openConnections.Done() - - l := log.With(t.logger, "address", t.listener.Addr().String()) - - backoff := backoff.New(t.ctx, backoff.Config{ - MinBackoff: 5 * time.Millisecond, - MaxBackoff: 1 * time.Second, - }) - - for { - c, err := t.listener.Accept() - if err != nil { - if t.ctx.Err() != nil { - level.Info(l).Log("msg", "syslog server shutting down") - return - } - - if ne, ok := err.(net.Error); ok && ne.Temporary() { - level.Warn(l).Log("msg", "failed to accept syslog connection", "err", err, "num_retries", backoff.NumRetries()) - backoff.Wait() - continue - } - - level.Error(l).Log("msg", "failed to accept syslog connection. quiting", "err", err) - return - } - backoff.Reset() - - t.openConnections.Add(1) - go t.handleConnection(c) - } - -} - -func (t *SyslogTarget) handleConnection(cn net.Conn) { - defer t.openConnections.Done() - - c := &idleTimeoutConn{cn, t.idleTimeout()} - - handlerCtx, cancel := context.WithCancel(t.ctx) - defer cancel() - go func() { - <-handlerCtx.Done() - _ = c.Close() - }() - - connLabels := t.connectionLabels(c) - - err := syslogparser.ParseStream(c, func(result *syslog.Result) { - if err := result.Error; err != nil { - t.handleMessageError(err) - return - } - t.handleMessage(connLabels.Copy(), result.Message) - }, t.maxMessageLength()) - - if err != nil { - level.Warn(t.logger).Log("msg", "error initializing syslog stream", "err", err) + return nil, err } + return t, nil } func (t *SyslogTarget) handleMessageError(err error) { @@ -247,7 +136,7 @@ func (t *SyslogTarget) handleMessage(connLabels labels.Labels, msg syslog.Messag if t.config.LabelStructuredData && rfc5424Msg.StructuredData != nil { for id, params := range *rfc5424Msg.StructuredData { - id = strings.Replace(id, "@", "_", -1) + id = strings.ReplaceAll(id, "@", "_") for name, value := range params { key := "__syslog_message_sd_" + id + "_" + name lb.Set(key, value) @@ -295,33 +184,7 @@ func (t *SyslogTarget) messageSender(entries chan<- api.Entry) { } t.metrics.syslogEntries.Inc() } -} - -func (t *SyslogTarget) connectionLabels(c net.Conn) labels.Labels { - lb := labels.NewBuilder(nil) - for k, v := range t.config.Labels { - lb.Set(string(k), string(v)) - } - - ip := ipFromConn(c).String() - lb.Set("__syslog_connection_ip_address", ip) - lb.Set("__syslog_connection_hostname", lookupAddr(ip)) - - return lb.Labels() -} - -func ipFromConn(c net.Conn) net.IP { - switch addr := c.RemoteAddr().(type) { - case *net.TCPAddr: - return addr.IP - } - - return nil -} - -func lookupAddr(addr string) string { - names, _ := net.LookupAddr(addr) - return strings.Join(names, ",") + t.messagesDone <- struct{}{} } // Type returns SyslogTargetType. @@ -331,7 +194,7 @@ func (t *SyslogTarget) Type() target.TargetType { // Ready indicates whether or not the syslog target is ready to be read from. func (t *SyslogTarget) Ready() bool { - return true + return t.transport.Ready() } // DiscoveredLabels returns the set of labels discovered by the syslog target, which @@ -353,48 +216,23 @@ func (t *SyslogTarget) Details() interface{} { // Stop shuts down the SyslogTarget. func (t *SyslogTarget) Stop() error { - t.ctxCancel() - err := t.listener.Close() - t.openConnections.Wait() + err := t.transport.Close() + t.transport.Wait() close(t.messages) + // wait for all pending messages to be processed and sent to handler + <-t.messagesDone t.handler.Stop() return err } // ListenAddress returns the address SyslogTarget is listening on. func (t *SyslogTarget) ListenAddress() net.Addr { - return t.listener.Addr() -} - -func (t *SyslogTarget) idleTimeout() time.Duration { - if t.config.IdleTimeout != 0 { - return t.config.IdleTimeout - } - return defaultIdleTimeout + return t.transport.Addr() } -func (t *SyslogTarget) maxMessageLength() int { - if t.config.MaxMessageLength != 0 { - return t.config.MaxMessageLength +func (t *SyslogTarget) transportProtocol() string { + if t.config.ListenProtocol != "" { + return t.config.ListenProtocol } - return defaultMaxMessageLength -} - -type idleTimeoutConn struct { - net.Conn - idleTimeout time.Duration -} - -func (c *idleTimeoutConn) Write(p []byte) (int, error) { - c.setDeadline() - return c.Conn.Write(p) -} - -func (c *idleTimeoutConn) Read(b []byte) (int, error) { - c.setDeadline() - return c.Conn.Read(b) -} - -func (c *idleTimeoutConn) setDeadline() { - _ = c.Conn.SetDeadline(time.Now().Add(c.idleTimeout)) + return defaultProtocol } diff --git a/clients/pkg/promtail/targets/syslog/syslogtarget_test.go b/clients/pkg/promtail/targets/syslog/syslogtarget_test.go index cf72318ed2605..696d3461208b9 100644 --- a/clients/pkg/promtail/targets/syslog/syslogtarget_test.go +++ b/clients/pkg/promtail/targets/syslog/syslogtarget_test.go @@ -13,6 +13,7 @@ import ( "unicode/utf8" "github.com/go-kit/log" + "github.com/influxdata/go-syslog/v3" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" @@ -21,6 +22,7 @@ import ( "github.com/grafana/loki/clients/pkg/promtail/client/fake" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/clients/pkg/promtail/targets/syslog/syslogparser" ) var ( @@ -231,64 +233,152 @@ o+KrhWQRriAj+GFMIpnT0r28EhOWS/d+f9ISk/it796YtDhfMb9GmV9VI7o= `) ) -func TestSyslogTarget_NewlineSeparatedMessages(t *testing.T) { - testSyslogTarget(t, false) -} - -func TestSyslogTarget_OctetCounting(t *testing.T) { - testSyslogTarget(t, true) -} - -func testSyslogTarget(t *testing.T, octetCounting bool) { - w := log.NewSyncWriter(os.Stderr) - logger := log.NewLogfmtLogger(w) - client := fake.New(func() {}) +type formatFunc func(string) string - metrics := NewMetrics(nil) - tgt, err := NewSyslogTarget(metrics, logger, client, relabelConfig(t), &scrapeconfig.SyslogTargetConfig{ - ListenAddress: "127.0.0.1:0", - LabelStructuredData: true, - Labels: model.LabelSet{ - "test": "syslog_target", - }, - }) - require.NoError(t, err) - defer func() { - require.NoError(t, tgt.Stop()) - }() - - addr := tgt.ListenAddress().String() - c, err := net.Dial("tcp", addr) - require.NoError(t, err) +var ( + fmtOctetCounting = func(s string) string { return fmt.Sprintf("%d %s", len(s), s) } + fmtNewline = func(s string) string { return s + "\n" } +) - messages := []string{ - `<165>1 2018-10-11T22:14:15.003Z host5 e - id1 [custom@32473 exkey="1"] An application event log entry...`, - `<165>1 2018-10-11T22:14:15.005Z host5 e - id2 [custom@32473 exkey="2"] An application event log entry...`, - `<165>1 2018-10-11T22:14:15.007Z host5 e - id3 [custom@32473 exkey="3"] An application event log entry...`, +func Benchmark_SyslogTarget(b *testing.B) { + for _, tt := range []struct { + name string + protocol string + formatFunc formatFunc + }{ + {"tcp", protocolTCP, fmtOctetCounting}, + {"udp", protocolUDP, fmtOctetCounting}, + } { + tt := tt + b.Run(tt.name, func(b *testing.B) { + client := fake.New(func() {}) + + metrics := NewMetrics(nil) + tgt, _ := NewSyslogTarget(metrics, log.NewNopLogger(), client, []*relabel.Config{}, &scrapeconfig.SyslogTargetConfig{ + ListenAddress: "127.0.0.1:0", + ListenProtocol: tt.protocol, + LabelStructuredData: true, + Labels: model.LabelSet{ + "test": "syslog_target", + }, + }) + b.Cleanup(func() { + require.NoError(b, tgt.Stop()) + }) + require.Eventually(b, tgt.Ready, time.Second, 10*time.Millisecond) + + addr := tgt.ListenAddress().String() + + messages := []string{ + `<165>1 2022-04-08T22:14:10.001Z host1 app - id1 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:11.002Z host2 app - id2 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:12.003Z host1 app - id3 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:13.004Z host2 app - id4 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:14.005Z host1 app - id5 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:15.002Z host2 app - id6 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:16.003Z host1 app - id7 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:17.004Z host2 app - id8 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:18.005Z host1 app - id9 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2022-04-08T22:14:19.001Z host2 app - id10 [custom@32473 exkey="1"] An application event log entry...`, + } + + b.ReportAllocs() + b.ResetTimer() + + c, _ := net.Dial(tt.protocol, addr) + for n := 0; n < b.N; n++ { + _ = writeMessagesToStream(c, messages, tt.formatFunc) + } + c.Close() + + require.Eventuallyf(b, func() bool { + return len(client.Received()) == len(messages)*b.N + }, 15*time.Second, time.Second, "expected: %d got:%d", len(messages)*b.N, len(client.Received())) + + }) } +} - err = writeMessagesToStream(c, messages, octetCounting) - require.NoError(t, err) - require.NoError(t, c.Close()) - - require.Eventuallyf(t, func() bool { - return len(client.Received()) == len(messages) - }, time.Second, time.Millisecond, "Expected to receive %d messages, got %d.", len(messages), len(client.Received())) - - require.Equal(t, model.LabelSet{ - "test": "syslog_target", - - "severity": "notice", - "facility": "local4", - "hostname": "host5", - "app_name": "e", - "msg_id": "id1", - - "sd_custom_exkey": "1", - }, client.Received()[0].Labels) - require.Equal(t, "An application event log entry...", client.Received()[0].Line) - - require.NotZero(t, client.Received()[0].Timestamp) +func TestSyslogTarget(t *testing.T) { + for _, tt := range []struct { + name string + protocol string + fmtFunc formatFunc + }{ + {"tpc newline separated", protocolTCP, fmtNewline}, + {"tpc octetcounting", protocolTCP, fmtOctetCounting}, + {"udp newline separated", protocolUDP, fmtNewline}, + {"udp octetcounting", protocolUDP, fmtOctetCounting}, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + w := log.NewSyncWriter(os.Stderr) + logger := log.NewLogfmtLogger(w) + client := fake.New(func() {}) + + metrics := NewMetrics(nil) + tgt, err := NewSyslogTarget(metrics, logger, client, relabelConfig(t), &scrapeconfig.SyslogTargetConfig{ + MaxMessageLength: 1 << 12, // explicitly not use default value + ListenAddress: "127.0.0.1:0", + ListenProtocol: tt.protocol, + LabelStructuredData: true, + Labels: model.LabelSet{ + "test": "syslog_target", + }, + }) + require.NoError(t, err) + + require.Eventually(t, tgt.Ready, time.Second, 10*time.Millisecond) + + addr := tgt.ListenAddress().String() + c, err := net.Dial(tt.protocol, addr) + require.NoError(t, err) + + messages := []string{ + `<165>1 2018-10-11T22:14:15.003Z host5 e - id1 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2018-10-11T22:14:15.005Z host5 e - id2 [custom@32473 exkey="2"] An application event log entry...`, + `<165>1 2018-10-11T22:14:15.007Z host5 e - id3 [custom@32473 exkey="3"] An application event log entry...`, + } + + err = writeMessagesToStream(c, messages, tt.fmtFunc) + require.NoError(t, err) + require.NoError(t, c.Close()) + + if tt.protocol == protocolUDP { + time.Sleep(time.Second) + require.NoError(t, tgt.Stop()) + } else { + defer func() { + require.NoError(t, tgt.Stop()) + }() + } + + require.Eventuallyf(t, func() bool { + return len(client.Received()) == len(messages) + }, time.Second, 10*time.Millisecond, "Expected to receive %d messages.", len(messages)) + + labels := make([]model.LabelSet, 0, len(messages)) + for _, entry := range client.Received() { + labels = append(labels, entry.Labels) + } + // we only check if one of the received entries contain the wanted label set + // because UDP does not guarantee the order of the messages + require.Contains(t, labels, model.LabelSet{ + "test": "syslog_target", + + "severity": "notice", + "facility": "local4", + "hostname": "host5", + "app_name": "e", + "msg_id": "id1", + + "sd_custom_exkey": "1", + }) + require.Equal(t, "An application event log entry...", client.Received()[0].Line) + + require.NotZero(t, client.Received()[0].Timestamp) + }) + } } func relabelConfig(t *testing.T) []*relabel.Config { @@ -316,72 +406,73 @@ func relabelConfig(t *testing.T) []*relabel.Config { return relabels } -func writeMessagesToStream(w io.Writer, messages []string, octetCounting bool) error { - var formatter func(string) string - - if octetCounting { - formatter = func(s string) string { - return fmt.Sprintf("%d %s", len(s), s) - } - } else { - formatter = func(s string) string { - return s + "\n" - } - } - +func writeMessagesToStream(w io.Writer, messages []string, formatter formatFunc) error { for _, msg := range messages { _, err := fmt.Fprint(w, formatter(msg)) if err != nil { return err } } - return nil } func TestSyslogTarget_RFC5424Messages(t *testing.T) { - w := log.NewSyncWriter(os.Stderr) - logger := log.NewLogfmtLogger(w) - client := fake.New(func() {}) - - metrics := NewMetrics(nil) - tgt, err := NewSyslogTarget(metrics, logger, client, []*relabel.Config{}, &scrapeconfig.SyslogTargetConfig{ - ListenAddress: "127.0.0.1:0", - LabelStructuredData: true, - Labels: model.LabelSet{ - "test": "syslog_target", - }, - UseRFC5424Message: true, - }) - require.NoError(t, err) - defer func() { - require.NoError(t, tgt.Stop()) - }() - - addr := tgt.ListenAddress().String() - c, err := net.Dial("tcp", addr) - require.NoError(t, err) - - messages := []string{ - `<165>1 2018-10-11T22:14:15.003Z host5 e - id1 [custom@32473 exkey="1"] An application event log entry...`, - `<165>1 2018-10-11T22:14:15.005Z host5 e - id2 [custom@32473 exkey="2"] An application event log entry...`, - `<165>1 2018-10-11T22:14:15.007Z host5 e - id3 [custom@32473 exkey="3"] An application event log entry...`, - } - - err = writeMessagesToStream(c, messages, false) - require.NoError(t, err) - require.NoError(t, c.Close()) - - require.Eventuallyf(t, func() bool { - return len(client.Received()) == len(messages) - }, time.Second, time.Millisecond, "Expected to receive %d messages, got %d.", len(messages), len(client.Received())) - - for i, m := range messages { - require.Equal(t, model.LabelSet{ - "test": "syslog_target", - }, client.Received()[i].Labels) - require.Equal(t, m, client.Received()[i].Line) - require.NotZero(t, client.Received()[i].Timestamp) + for _, tt := range []struct { + name string + protocol string + fmtFunc formatFunc + }{ + {"tpc newline separated", protocolTCP, fmtNewline}, + {"tpc octetcounting", protocolTCP, fmtOctetCounting}, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + w := log.NewSyncWriter(os.Stderr) + logger := log.NewLogfmtLogger(w) + client := fake.New(func() {}) + + metrics := NewMetrics(nil) + tgt, err := NewSyslogTarget(metrics, logger, client, []*relabel.Config{}, &scrapeconfig.SyslogTargetConfig{ + ListenAddress: "127.0.0.1:0", + ListenProtocol: tt.protocol, + LabelStructuredData: true, + Labels: model.LabelSet{ + "test": "syslog_target", + }, + UseRFC5424Message: true, + }) + require.NoError(t, err) + require.Eventually(t, tgt.Ready, time.Second, 10*time.Millisecond) + defer func() { + require.NoError(t, tgt.Stop()) + }() + + addr := tgt.ListenAddress().String() + c, err := net.Dial(tt.protocol, addr) + require.NoError(t, err) + + messages := []string{ + `<165>1 2018-10-11T22:14:15.003Z host5 e - id1 [custom@32473 exkey="1"] An application event log entry...`, + `<165>1 2018-10-11T22:14:15.005Z host5 e - id2 [custom@32473 exkey="2"] An application event log entry...`, + `<165>1 2018-10-11T22:14:15.007Z host5 e - id3 [custom@32473 exkey="3"] An application event log entry...`, + } + + err = writeMessagesToStream(c, messages, tt.fmtFunc) + require.NoError(t, err) + require.NoError(t, c.Close()) + + require.Eventuallyf(t, func() bool { + return len(client.Received()) == len(messages) + }, time.Second, time.Millisecond, "Expected to receive %d messages, got %d.", len(messages), len(client.Received())) + + for i := range messages { + require.Equal(t, model.LabelSet{ + "test": "syslog_target", + }, client.Received()[i].Labels) + require.Contains(t, messages, client.Received()[i].Line) + require.NotZero(t, client.Received()[i].Timestamp) + } + }) } } @@ -417,14 +508,14 @@ func TestSyslogTarget_TLSConfigWithoutServerKey(t *testing.T) { func TestSyslogTarget_TLSConfig(t *testing.T) { t.Run("NewlineSeparatedMessages", func(t *testing.T) { - testSyslogTargetWithTLS(t, false) + testSyslogTargetWithTLS(t, fmtNewline) }) t.Run("OctetCounting", func(t *testing.T) { - testSyslogTargetWithTLS(t, true) + testSyslogTargetWithTLS(t, fmtOctetCounting) }) } -func testSyslogTargetWithTLS(t *testing.T, octetCounting bool) { +func testSyslogTargetWithTLS(t *testing.T, fmtFunc formatFunc) { caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) @@ -483,7 +574,7 @@ func testSyslogTargetWithTLS(t *testing.T, octetCounting bool) { } messages := append(malformeddMessages, validMessages...) - err = writeMessagesToStream(c, messages, octetCounting) + err = writeMessagesToStream(c, messages, fmtFunc) require.NoError(t, err) require.NoError(t, c.Close()) @@ -526,14 +617,14 @@ func createTempFile(data []byte) (*os.File, error) { func TestSyslogTarget_TLSConfigVerifyClientCertificate(t *testing.T) { t.Run("NewlineSeparatedMessages", func(t *testing.T) { - testSyslogTargetWithTLSVerifyClientCertificate(t, false) + testSyslogTargetWithTLSVerifyClientCertificate(t, fmtNewline) }) t.Run("OctetCounting", func(t *testing.T) { - testSyslogTargetWithTLSVerifyClientCertificate(t, true) + testSyslogTargetWithTLSVerifyClientCertificate(t, fmtOctetCounting) }) } -func testSyslogTargetWithTLSVerifyClientCertificate(t *testing.T, octetCounting bool) { +func testSyslogTargetWithTLSVerifyClientCertificate(t *testing.T, fmtFunc formatFunc) { caCertFile, err := createTempFile(caCert) if err != nil { t.Fatalf("Unable to create CA certificate temporary file: %s", err) @@ -624,7 +715,7 @@ func testSyslogTargetWithTLSVerifyClientCertificate(t *testing.T, octetCounting `<165>1 2018-10-11T22:14:15.007Z host5 e - id3 [custom@32473 exkey="3"] An application event log entry...`, } - err = writeMessagesToStream(c, messages, octetCounting) + err = writeMessagesToStream(c, messages, fmtFunc) require.NoError(t, err) require.NoError(t, c.Close()) @@ -706,7 +797,7 @@ func TestSyslogTarget_NonUTF8Message(t *testing.T) { err = writeMessagesToStream(c, []string{ "<165>1 - - - - - - " + msg1, "<123>1 - - - - - - " + msg2, - }, true) + }, fmtOctetCounting) require.NoError(t, err) require.NoError(t, c.Close()) @@ -747,3 +838,29 @@ func TestSyslogTarget_IdleTimeout(t *testing.T) { _, err = c.Read(buf) require.EqualError(t, err, "EOF") } + +func TestParseStream_WithAsyncPipe(t *testing.T) { + lines := [3]string{ + "<165>1 2018-10-11T22:14:15.003Z host5 e - id1 [custom@32473 exkey=\"1\"] An application event log entry...\n", + "<165>1 2018-10-11T22:14:15.005Z host5 e - id2 [custom@32473 exkey=\"2\"] An application event log entry...\n", + "<165>1 2018-10-11T22:14:15.007Z host5 e - id3 [custom@32473 exkey=\"3\"] An application event log entry...\n", + } + + addr := &net.UDPAddr{IP: net.IP{127, 0, 0, 1}, Port: 1514} + pipe := NewConnPipe(addr) + go func() { + for _, line := range lines { + _, _ = pipe.Write([]byte(line)) + } + pipe.Close() + }() + + results := make([]*syslog.Result, 0) + cb := func(res *syslog.Result) { + results = append(results, res) + } + + err := syslogparser.ParseStream(pipe, cb, defaultMaxMessageLength) + require.NoError(t, err) + require.Equal(t, 3, len(results)) +} diff --git a/clients/pkg/promtail/targets/syslog/transport.go b/clients/pkg/promtail/targets/syslog/transport.go new file mode 100644 index 0000000000000..0d88d5a784ea1 --- /dev/null +++ b/clients/pkg/promtail/targets/syslog/transport.go @@ -0,0 +1,406 @@ +package syslog + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net" + "strings" + "sync" + "time" + + "github.com/grafana/dskit/backoff" + "github.com/mwitkow/go-conntrack" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/clients/pkg/promtail/targets/syslog/syslogparser" + "github.com/influxdata/go-syslog/v3" + "github.com/prometheus/prometheus/model/labels" +) + +var ( + protocolUDP = "udp" + protocolTCP = "tcp" +) + +type Transport interface { + Run() error + Addr() net.Addr + Ready() bool + Close() error + Wait() +} + +type handleMessage func(labels.Labels, syslog.Message) +type handleMessageError func(error) + +type baseTransport struct { + config *scrapeconfig.SyslogTargetConfig + logger log.Logger + + openConnections *sync.WaitGroup + + handleMessage handleMessage + handleMessageError handleMessageError + + ctx context.Context + ctxCancel context.CancelFunc +} + +func (t *baseTransport) close() { + t.ctxCancel() +} + +// Ready implements SyslogTransport +func (t *baseTransport) Ready() bool { + return t.ctx.Err() == nil +} + +func (t *baseTransport) idleTimeout() time.Duration { + if t.config.IdleTimeout != 0 { + return t.config.IdleTimeout + } + return defaultIdleTimeout +} + +func (t *baseTransport) maxMessageLength() int { + if t.config.MaxMessageLength != 0 { + return t.config.MaxMessageLength + } + return defaultMaxMessageLength +} + +func (t *baseTransport) connectionLabels(ip string) labels.Labels { + lb := labels.NewBuilder(nil) + for k, v := range t.config.Labels { + lb.Set(string(k), string(v)) + } + + lb.Set("__syslog_connection_ip_address", ip) + lb.Set("__syslog_connection_hostname", lookupAddr(ip)) + + return lb.Labels() +} + +func ipFromConn(c net.Conn) net.IP { + switch addr := c.RemoteAddr().(type) { + case *net.TCPAddr: + return addr.IP + } + + return nil +} + +func lookupAddr(addr string) string { + names, _ := net.LookupAddr(addr) + return strings.Join(names, ",") +} + +func newBaseTransport(config *scrapeconfig.SyslogTargetConfig, handleMessage handleMessage, handleError handleMessageError, logger log.Logger) *baseTransport { + ctx, cancel := context.WithCancel(context.Background()) + return &baseTransport{ + config: config, + logger: logger, + openConnections: new(sync.WaitGroup), + handleMessage: handleMessage, + handleMessageError: handleError, + ctx: ctx, + ctxCancel: cancel, + } +} + +type idleTimeoutConn struct { + net.Conn + idleTimeout time.Duration +} + +func (c *idleTimeoutConn) Write(p []byte) (int, error) { + c.setDeadline() + return c.Conn.Write(p) +} + +func (c *idleTimeoutConn) Read(b []byte) (int, error) { + c.setDeadline() + return c.Conn.Read(b) +} + +func (c *idleTimeoutConn) setDeadline() { + _ = c.Conn.SetDeadline(time.Now().Add(c.idleTimeout)) +} + +type ConnPipe struct { + addr net.Addr + *io.PipeReader + *io.PipeWriter +} + +func NewConnPipe(addr net.Addr) *ConnPipe { + pr, pw := io.Pipe() + return &ConnPipe{ + addr: addr, + PipeReader: pr, + PipeWriter: pw, + } +} + +func (pipe *ConnPipe) Close() error { + if err := pipe.PipeWriter.Close(); err != nil { + return err + } + return nil +} + +type TCPTransport struct { + *baseTransport + listener net.Listener +} + +func NewSyslogTCPTransport(config *scrapeconfig.SyslogTargetConfig, handleMessage handleMessage, handleError handleMessageError, logger log.Logger) Transport { + return &TCPTransport{ + baseTransport: newBaseTransport(config, handleMessage, handleError, logger), + } +} + +// Run implements SyslogTransport +func (t *TCPTransport) Run() error { + l, err := net.Listen(protocolTCP, t.config.ListenAddress) + l = conntrack.NewListener(l, conntrack.TrackWithName("syslog_target/"+t.config.ListenAddress)) + if err != nil { + return fmt.Errorf("error setting up syslog target: %w", err) + } + + tlsEnabled := t.config.TLSConfig.CertFile != "" || t.config.TLSConfig.KeyFile != "" || t.config.TLSConfig.CAFile != "" + if tlsEnabled { + tlsConfig, err := newTLSConfig(t.config.TLSConfig.CertFile, t.config.TLSConfig.KeyFile, t.config.TLSConfig.CAFile) + if err != nil { + return fmt.Errorf("error setting up syslog target: %w", err) + } + l = tls.NewListener(l, tlsConfig) + } + + t.listener = l + level.Info(t.logger).Log("msg", "syslog listening on address", "address", t.Addr().String(), "protocol", protocolTCP, "tls", tlsEnabled) + + t.openConnections.Add(1) + go t.acceptConnections() + + return nil +} + +func newTLSConfig(certFile string, keyFile string, caFile string) (*tls.Config, error) { + if certFile == "" || keyFile == "" { + return nil, fmt.Errorf("certificate and key files are required") + } + + certs, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, fmt.Errorf("unable to load server certificate or key: %w", err) + } + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{certs}, + } + + if caFile != "" { + caCert, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("unable to load client CA certificate: %w", err) + } + + caCertPool := x509.NewCertPool() + if ok := caCertPool.AppendCertsFromPEM(caCert); !ok { + return nil, fmt.Errorf("unable to parse client CA certificate") + } + + tlsConfig.ClientCAs = caCertPool + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + return tlsConfig, nil +} + +func (t *TCPTransport) acceptConnections() { + defer t.openConnections.Done() + + l := log.With(t.logger, "address", t.listener.Addr().String()) + + backoff := backoff.New(t.ctx, backoff.Config{ + MinBackoff: 5 * time.Millisecond, + MaxBackoff: 1 * time.Second, + }) + + for { + c, err := t.listener.Accept() + if err != nil { + if !t.Ready() { + level.Info(l).Log("msg", "syslog server shutting down", "protocol", protocolTCP, "err", t.ctx.Err()) + return + } + + if ne, ok := err.(net.Error); ok && ne.Temporary() { + level.Warn(l).Log("msg", "failed to accept syslog connection", "err", err, "num_retries", backoff.NumRetries()) + backoff.Wait() + continue + } + + level.Error(l).Log("msg", "failed to accept syslog connection. quiting", "err", err) + return + } + backoff.Reset() + + t.openConnections.Add(1) + go t.handleConnection(c) + } + +} + +func (t *TCPTransport) handleConnection(cn net.Conn) { + defer t.openConnections.Done() + + c := &idleTimeoutConn{cn, t.idleTimeout()} + + handlerCtx, cancel := context.WithCancel(t.ctx) + defer cancel() + go func() { + <-handlerCtx.Done() + _ = c.Close() + }() + + lbs := t.connectionLabels(ipFromConn(c).String()) + + err := syslogparser.ParseStream(c, func(result *syslog.Result) { + if err := result.Error; err != nil { + t.handleMessageError(err) + return + } + t.handleMessage(lbs.Copy(), result.Message) + }, t.maxMessageLength()) + + if err != nil { + level.Warn(t.logger).Log("msg", "error initializing syslog stream", "err", err) + } +} + +// Close implements SyslogTransport +func (t *TCPTransport) Close() error { + t.baseTransport.close() + return t.listener.Close() +} + +// Wait implements SyslogTransport +func (t *TCPTransport) Wait() { + t.openConnections.Wait() +} + +// Addr implements SyslogTransport +func (t *TCPTransport) Addr() net.Addr { + return t.listener.Addr() +} + +type UDPTransport struct { + *baseTransport + udpConn *net.UDPConn +} + +func NewSyslogUDPTransport(config *scrapeconfig.SyslogTargetConfig, handleMessage handleMessage, handleError handleMessageError, logger log.Logger) Transport { + return &UDPTransport{ + baseTransport: newBaseTransport(config, handleMessage, handleError, logger), + } +} + +// Run implements SyslogTransport +func (t *UDPTransport) Run() error { + var err error + addr, err := net.ResolveUDPAddr(protocolUDP, t.config.ListenAddress) + if err != nil { + return fmt.Errorf("error resolving UDP address: %w", err) + } + t.udpConn, err = net.ListenUDP(protocolUDP, addr) + if err != nil { + return fmt.Errorf("error setting up syslog target: %w", err) + } + _ = t.udpConn.SetReadBuffer(1024 * 1024) + level.Info(t.logger).Log("msg", "syslog listening on address", "address", t.Addr().String(), "protocol", protocolUDP) + + t.openConnections.Add(1) + go t.acceptPackets() + return nil +} + +// Close implements SyslogTransport +func (t *UDPTransport) Close() error { + t.baseTransport.close() + return t.udpConn.Close() +} + +func (t *UDPTransport) acceptPackets() { + defer t.openConnections.Done() + + var ( + n int + addr net.Addr + err error + ) + streams := make(map[string]*ConnPipe) + buf := make([]byte, t.maxMessageLength()) + + for { + if !t.Ready() { + level.Info(t.logger).Log("msg", "syslog server shutting down", "protocol", protocolUDP, "err", t.ctx.Err()) + for _, stream := range streams { + if err = stream.Close(); err != nil { + level.Error(t.logger).Log("msg", "failed to close pipe", "err", err) + } + } + return + } + n, addr, err = t.udpConn.ReadFrom(buf) + if n <= 0 && err != nil { + level.Warn(t.logger).Log("msg", "failed to read packets", "addr", addr, "err", err) + continue + } + + stream, ok := streams[addr.String()] + if !ok { + stream = NewConnPipe(addr) + streams[addr.String()] = stream + t.openConnections.Add(1) + go t.handleRcv(stream) + } + if _, err := stream.Write(buf[:n]); err != nil { + level.Warn(t.logger).Log("msg", "failed to write to stream", "addr", addr, "err", err) + } + } +} + +func (t *UDPTransport) handleRcv(c *ConnPipe) { + defer t.openConnections.Done() + + lbs := t.connectionLabels(c.addr.String()) + err := syslogparser.ParseStream(c, func(result *syslog.Result) { + if err := result.Error; err != nil { + t.handleMessageError(err) + } else { + t.handleMessage(lbs.Copy(), result.Message) + } + }, t.maxMessageLength()) + + if err != nil { + level.Warn(t.logger).Log("msg", "error parsing syslog stream", "err", err) + } +} + +// Wait implements SyslogTransport +func (t *UDPTransport) Wait() { + t.openConnections.Wait() +} + +// Addr implements SyslogTransport +func (t *UDPTransport) Addr() net.Addr { + return t.udpConn.LocalAddr() +} diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md index 6be07a852de70..c530a57869b3d 100644 --- a/docs/sources/clients/promtail/scraping.md +++ b/docs/sources/clients/promtail/scraping.md @@ -226,7 +226,7 @@ When Promtail receives GCP logs, various internal labels are made available for ## Syslog Receiver Promtail supports receiving [IETF Syslog (RFC5424)](https://tools.ietf.org/html/rfc5424) -messages from a tcp stream. Receiving syslog messages is defined in a `syslog` +messages from a TCP or UDP stream. Receiving syslog messages is defined in a `syslog` stanza: ```yaml @@ -234,6 +234,7 @@ scrape_configs: - job_name: syslog syslog: listen_address: 0.0.0.0:1514 + listen_protocol: tcp idle_timeout: 60s label_structured_data: yes labels: @@ -244,8 +245,11 @@ scrape_configs: ``` The only required field in the syslog section is the `listen_address` field, -where a valid network address should be provided. The `idle_timeout` can help -with cleaning up stale syslog connections. If `label_structured_data` is set, +where a valid network address must be provided. The default protocol for +receiving messages is TCP. To change the protocol, the `listen_protocol` field +can be changed to `udp`. Note, that UDP does not support TLS. +The `idle_timeout` can help with cleaning up stale syslog connections. +If `label_structured_data` is set, [structured data](https://tools.ietf.org/html/rfc5424#section-6.3) in the syslog header will be translated to internal labels in the form of `__syslog_message_sd__`. @@ -275,10 +279,17 @@ destination d_loki { ### Rsyslog Output Configuration +For sending messages via TCP: + ``` -action(type="omfwd" protocol="tcp" port="" Template="RSYSLOG_SyslogProtocol23Format" TCP_Framing="octet-counted") +*.* action(type="omfwd" protocol="tcp" target="" port="" Template="RSYSLOG_SyslogProtocol23Format" TCP_Framing="octet-counted" KeepAlive="on") ``` +For sending messages via UDP: + +``` +*.* action(type="omfwd" protocol="udp" target="" port="" Template="RSYSLOG_SyslogProtocol23Format") +``` ## Kafka From 213bb86802559a0e57c34275a814747240fa77d2 Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Tue, 3 May 2022 09:06:54 +0100 Subject: [PATCH 057/223] Add integration tests which test happy path (#5968) * Add integration tests which test happy path This PR adds simple go based integration tests, which can run Loki in two modes: - single-binary - micro-services with index-gateway After standing up it performs the most basic duties like: - ingest logs - run a range query - query label names and label values This is meant to catch more significant problem as early as possible in the PR. We should aim to keep this as simple as possible. * Replace config generation with template Makes configuration much more readable and avoids easy mistakes * Apply suggestions from code review Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- integration/client/client.go | 438 ++++++++++++++++++++++++ integration/cluster/cluster.go | 342 ++++++++++++++++++ integration/loki_micro_services_test.go | 106 ++++++ integration/loki_single_binary_test.go | 73 ++++ integration/shared_test.go | 20 ++ pkg/loki/loki.go | 5 +- pkg/storage/chunk/fetcher/fetcher.go | 11 +- 7 files changed, 989 insertions(+), 6 deletions(-) create mode 100644 integration/client/client.go create mode 100644 integration/cluster/cluster.go create mode 100644 integration/loki_micro_services_test.go create mode 100644 integration/loki_single_binary_test.go create mode 100644 integration/shared_test.go diff --git a/integration/client/client.go b/integration/client/client.go new file mode 100644 index 0000000000000..09ff9292c9378 --- /dev/null +++ b/integration/client/client.go @@ -0,0 +1,438 @@ +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +type roundTripper struct { + instanceID string + token string + injectHeaders map[string][]string + next http.RoundTripper +} + +func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("X-Scope-OrgID", r.instanceID) + if r.token != "" { + req.SetBasicAuth(r.instanceID, r.token) + } + + for key, values := range r.injectHeaders { + for _, v := range values { + req.Header.Add(key, v) + } + + fmt.Println(req.Header.Values(key)) + } + + return r.next.RoundTrip(req) +} + +type CortexClientOption interface { + Type() string +} + +type InjectHeadersOption map[string][]string + +func (n InjectHeadersOption) Type() string { + return "headerinject" +} + +// Client is a HTTP client that adds basic auth and scope +type Client struct { + Now time.Time + + httpClient *http.Client + baseURL string + instanceID string +} + +// NewLogsClient creates a new client +func New(instanceID, token, baseURL string, opts ...CortexClientOption) *Client { + rt := &roundTripper{ + instanceID: instanceID, + token: token, + next: http.DefaultTransport, + } + + for _, opt := range opts { + switch opt.Type() { + case "headerinject": + rt.injectHeaders = opt.(InjectHeadersOption) + } + } + + return &Client{ + Now: time.Now(), + httpClient: &http.Client{ + Transport: rt, + }, + baseURL: baseURL, + instanceID: instanceID, + } +} + +// PushLogLine creates a new logline with the current time as timestamp +func (c *Client) PushLogLine(line string, extraLabels ...map[string]string) error { + return c.pushLogLine(line, c.Now, extraLabels...) +} + +// PushLogLineWithTimestamp creates a new logline at the given timestamp +// The timestamp has to be a Unix timestamp (epoch seconds) +func (c *Client) PushLogLineWithTimestamp(line string, timestamp time.Time, extraLabelList ...map[string]string) error { + return c.pushLogLine(line, timestamp, extraLabelList...) +} + +func formatTS(ts time.Time) string { + return strconv.FormatInt(ts.UnixNano(), 10) +} + +type stream struct { + Stream map[string]string `json:"stream"` + Values [][]string `json:"values"` +} + +// pushLogLine creates a new logline +func (c *Client) pushLogLine(line string, timestamp time.Time, extraLabelList ...map[string]string) error { + apiEndpoint := fmt.Sprintf("%s/loki/api/v1/push", c.baseURL) + + s := stream{ + Stream: map[string]string{ + "job": "varlog", + }, + Values: [][]string{ + { + formatTS(timestamp), + line, + }, + }, + } + // add extra labels + for _, labelList := range extraLabelList { + for k, v := range labelList { + s.Stream[k] = v + } + } + + data, err := json.Marshal(&struct { + Streams []stream `json:"streams"` + }{ + Streams: []stream{s}, + }) + if err != nil { + return err + } + req, err := http.NewRequest("POST", apiEndpoint, bytes.NewReader(data)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Scope-OrgID", c.instanceID) + + // Execute HTTP request + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + + if res.StatusCode/100 == 2 { + defer res.Body.Close() + return nil + } + + buf, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("reading request failed with status code %v: %w", res.StatusCode, err) + } + + return fmt.Errorf("request failed with status code %v: %w", res.StatusCode, errors.New(string(buf))) +} + +func (c *Client) Get(path string) (*http.Response, error) { + url := fmt.Sprintf("%s%s", c.baseURL, path) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.httpClient.Do(req) +} + +// Get all the metrics +func (c *Client) Metrics() (string, error) { + url := fmt.Sprintf("%s/metrics", c.baseURL) + res, err := http.Get(url) + if err != nil { + return "", err + } + + var sb strings.Builder + if _, err := io.Copy(&sb, res.Body); err != nil { + return "", err + } + + if res.StatusCode != http.StatusOK { + return "", fmt.Errorf("request failed with status code %d", res.StatusCode) + } + return sb.String(), nil +} + +// Flush all in-memory chunks held by the ingesters to the backing store +func (c *Client) Flush() error { + req, err := c.request("POST", fmt.Sprintf("%s/flush", c.baseURL)) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + if res.StatusCode/100 == 2 { + return nil + } + return fmt.Errorf("request failed with status code %d", res.StatusCode) +} + +// StreamValues holds a label key value pairs for the Stream and a list of a list of values +type StreamValues struct { + Stream map[string]string + Values [][]string +} + +// MatrixValues holds a label key value pairs for the metric and a list of a list of values +type MatrixValues struct { + Metric map[string]string + Values [][]interface{} +} + +// VectorValues holds a label key value pairs for the metric and single timestamp and value +type VectorValues struct { + Metric map[string]string `json:"metric"` + Time time.Time + Value string +} + +func (a *VectorValues) UnmarshalJSON(b []byte) error { + var s struct { + Metric map[string]string `json:"metric"` + Value []interface{} `json:"value"` + } + if err := json.Unmarshal(b, &s); err != nil { + return err + } + a.Metric = s.Metric + if len(s.Value) != 2 { + return fmt.Errorf("unexpected value length %d", len(s.Value)) + } + if ts, ok := s.Value[0].(int64); ok { + a.Time = time.Unix(ts, 0) + } + if val, ok := s.Value[1].(string); ok { + a.Value = val + } + return nil +} + +// DataType holds the result type and a list of StreamValues +type DataType struct { + ResultType string + Stream []StreamValues + Matrix []MatrixValues + Vector []VectorValues +} + +func (a *DataType) UnmarshalJSON(b []byte) error { + // get the result type + var s struct { + ResultType string `json:"resultType"` + Result json.RawMessage `json:"result"` + } + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + switch s.ResultType { + case "streams": + if err := json.Unmarshal(s.Result, &a.Stream); err != nil { + return err + } + case "matrix": + if err := json.Unmarshal(s.Result, &a.Matrix); err != nil { + return err + } + case "vector": + if err := json.Unmarshal(s.Result, &a.Vector); err != nil { + return err + } + default: + return fmt.Errorf("unknown result type %s", s.ResultType) + } + a.ResultType = s.ResultType + return nil +} + +// Response holds the status and data +type Response struct { + Status string + Data DataType +} + +// RunRangeQuery runs a query and returns an error if anything went wrong +func (c *Client) RunRangeQuery(query string) (*Response, error) { + buf, statusCode, err := c.run(c.rangeQueryURL(query)) + if err != nil { + return nil, err + } + + return c.parseResponse(buf, statusCode) +} + +// RunQuery runs a query and returns an error if anything went wrong +func (c *Client) RunQuery(query string) (*Response, error) { + v := url.Values{} + v.Set("query", query) + v.Set("time", formatTS(c.Now.Add(time.Second))) + + u, err := url.Parse(c.baseURL) + if err != nil { + return nil, err + } + u.Path = "/loki/api/v1/query" + u.RawQuery = v.Encode() + + buf, statusCode, err := c.run(u.String()) + if err != nil { + return nil, err + } + + return c.parseResponse(buf, statusCode) +} + +func (c *Client) parseResponse(buf []byte, statusCode int) (*Response, error) { + lokiResp := Response{} + err := json.Unmarshal(buf, &lokiResp) + if err != nil { + return nil, fmt.Errorf("error parsing response data: %w", err) + } + + if statusCode/100 == 2 { + return &lokiResp, nil + } + return nil, fmt.Errorf("request failed with status code %d: %w", statusCode, errors.New(string(buf))) +} + +func (c *Client) rangeQueryURL(query string) string { + v := url.Values{} + v.Set("query", query) + v.Set("start", formatTS(c.Now.Add(-2*time.Hour))) + v.Set("end", formatTS(c.Now.Add(time.Second))) + + u, err := url.Parse(c.baseURL) + if err != nil { + panic(err) + } + u.Path = "/loki/api/v1/query_range" + u.RawQuery = v.Encode() + + return u.String() +} + +func (c *Client) LabelNames() ([]string, error) { + url := fmt.Sprintf("%s/loki/api/v1/labels", c.baseURL) + + req, err := c.request("GET", url) + if err != nil { + return nil, err + } + + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode/100 != 2 { + return nil, fmt.Errorf("Unexpected status code of %d", res.StatusCode) + } + + var values struct { + Data []string `json:"data"` + } + if err := json.NewDecoder(res.Body).Decode(&values); err != nil { + return nil, err + } + + return values.Data, nil +} + +// LabelValues return a LabelValues query +func (c *Client) LabelValues(labelName string) ([]string, error) { + url := fmt.Sprintf("%s/loki/api/v1/label/%s/values", c.baseURL, url.PathEscape(labelName)) + + req, err := c.request("GET", url) + if err != nil { + return nil, err + } + + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode/100 != 2 { + return nil, fmt.Errorf("Unexpected status code of %d", res.StatusCode) + } + + var values struct { + Data []string `json:"data"` + } + if err := json.NewDecoder(res.Body).Decode(&values); err != nil { + return nil, err + } + + return values.Data, nil +} + +func (c *Client) request(method string, url string) (*http.Request, error) { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + req.Header.Set("X-Scope-OrgID", c.instanceID) + return req, nil +} + +func (c *Client) run(u string) ([]byte, int, error) { + req, err := c.request("GET", u) + if err != nil { + return nil, 0, err + } + + // Execute HTTP request + res, err := c.httpClient.Do(req) + if err != nil { + return nil, 0, err + } + defer res.Body.Close() + + buf, err := io.ReadAll(res.Body) + if err != nil { + return nil, 0, fmt.Errorf("request failed with status code %v: %w", res.StatusCode, err) + } + + return buf, res.StatusCode, nil +} diff --git a/integration/cluster/cluster.go b/integration/cluster/cluster.go new file mode 100644 index 0000000000000..787aefb068cf4 --- /dev/null +++ b/integration/cluster/cluster.go @@ -0,0 +1,342 @@ +package cluster + +import ( + "errors" + "flag" + "fmt" + "net" + "net/http/httptest" + "net/url" + "os" + "sync" + "text/template" + "time" + + "github.com/grafana/dskit/multierror" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/pkg/loki" + "github.com/grafana/loki/pkg/util/cfg" +) + +var ( + wrapRegistryOnce sync.Once + + configTemplate = template.Must(template.New("").Parse(` +auth_enabled: true + +server: + http_listen_port: {{.httpPort}} + grpc_listen_port: {{.grpcPort}} + +common: + path_prefix: {{.dataPath}} + storage: + filesystem: + chunks_directory: {{.sharedDataPath}}/chunks + rules_directory: {{.sharedDataPath}}/rules + replication_factor: 1 + ring: + instance_addr: 127.0.0.1 + kvstore: + store: inmemory + +storage_config: + boltdb_shipper: + shared_store: filesystem + active_index_directory: {{.dataPath}}/index + cache_location: {{.dataPath}}/boltdb-cache + +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + +compactor: + working_directory: {{.dataPath}}/retention + shared_store: filesystem + retention_enabled: true + +analytics: + reporting_enabled: false + +ingester: + lifecycler: + min_ready_duration: 0s + +frontend_worker: + scheduler_address: localhost:{{.schedulerPort}} + +frontend: + scheduler_address: localhost:{{.schedulerPort}} +`)) +) + +func wrapRegistry() { + wrapRegistryOnce.Do(func() { + prometheus.DefaultRegisterer = &wrappedRegisterer{Registerer: prometheus.DefaultRegisterer} + }) +} + +type wrappedRegisterer struct { + prometheus.Registerer +} + +func (w *wrappedRegisterer) Register(collector prometheus.Collector) error { + if err := w.Registerer.Register(collector); err != nil { + var aErr prometheus.AlreadyRegisteredError + if errors.As(err, &aErr) { + return nil + } + return err + } + return nil +} + +func (w *wrappedRegisterer) MustRegister(collectors ...prometheus.Collector) { + for _, c := range collectors { + if err := w.Register(c); err != nil { + panic(err.Error()) + } + } +} + +type Cluster struct { + sharedPath string + components []*Component + waitGroup sync.WaitGroup +} + +func New() *Cluster { + wrapRegistry() + sharedPath, err := os.MkdirTemp("", "loki-shared-data") + if err != nil { + panic(err.Error()) + } + + return &Cluster{ + sharedPath: sharedPath, + } +} + +func (c *Cluster) Run() error { + for _, component := range c.components { + if err := component.run(); err != nil { + return err + } + } + return nil +} +func (c *Cluster) Cleanup() error { + var ( + files []string + dirs []string + ) + if c.sharedPath != "" { + dirs = append(dirs, c.sharedPath) + } + + // call all components cleanup + errs := multierror.New() + for _, component := range c.components { + f, d := component.cleanup() + files = append(files, f...) + dirs = append(dirs, d...) + } + if err := errs.Err(); err != nil { + return err + } + + // wait for all process to close + c.waitGroup.Wait() + + // cleanup dirs/files + for _, d := range dirs { + errs.Add(os.RemoveAll(d)) + } + for _, f := range files { + errs.Add(os.Remove(f)) + } + + return errs.Err() +} + +func (c *Cluster) AddComponent(name string, flags ...string) *Component { + component := &Component{ + name: name, + cluster: c, + flags: flags, + } + + var err error + component.httpPort, err = getFreePort() + if err != nil { + panic(fmt.Errorf("error allocating HTTP port: %w", err)) + } + + component.grpcPort, err = getFreePort() + if err != nil { + panic(fmt.Errorf("error allocating GRPC port: %w", err)) + } + + c.components = append(c.components, component) + return component +} + +type Component struct { + loki *loki.Loki + name string + cluster *Cluster + flags []string + + httpPort int + grpcPort int + + configFile string + dataPath string +} + +func (c *Component) HTTPURL() *url.URL { + return &url.URL{ + Host: fmt.Sprintf("localhost:%d", c.httpPort), + Scheme: "http", + } +} + +func (c *Component) GRPCURL() *url.URL { + return &url.URL{ + Host: fmt.Sprintf("localhost:%d", c.grpcPort), + Scheme: "grpc", + } +} + +func (c *Component) writeConfig() error { + var err error + + configFile, err := os.CreateTemp("", "loki-config") + if err != nil { + return fmt.Errorf("error creating config file: %w", err) + } + + c.dataPath, err = os.MkdirTemp("", "loki-data") + if err != nil { + return fmt.Errorf("error creating data path: %w", err) + } + + if err := configTemplate.Execute(configFile, map[string]interface{}{ + "dataPath": c.dataPath, + "sharedDataPath": c.cluster.sharedPath, + "grpcPort": c.grpcPort, + "httpPort": c.httpPort, + "schedulerPort": c.grpcPort, + }); err != nil { + return fmt.Errorf("error writing config file: %w", err) + } + + if err := configFile.Close(); err != nil { + return fmt.Errorf("error closing config file: %w", err) + } + c.configFile = configFile.Name() + + return nil +} + +func (c *Component) run() error { + if err := c.writeConfig(); err != nil { + return err + } + + var config loki.ConfigWrapper + + var flagset = flag.NewFlagSet("test-flags", flag.ExitOnError) + + if err := cfg.DynamicUnmarshal(&config, append( + c.flags, + "-config.file", + c.configFile, + ), flagset); err != nil { + return err + } + + if err := config.Validate(); err != nil { + return err + } + + var err error + c.loki, err = loki.New(config.Config) + if err != nil { + return err + } + + var ( + readyCh = make(chan struct{}) + errCh = make(chan error, 1) + ) + + go func() { + for { + time.Sleep(time.Millisecond * 200) + if c.loki.Server.HTTP == nil { + continue + } + + req := httptest.NewRequest("GET", "http://localhost/ready", nil) + w := httptest.NewRecorder() + c.loki.Server.HTTP.ServeHTTP(w, req) + + if w.Code == 200 { + close(readyCh) + return + } + } + }() + + c.cluster.waitGroup.Add(1) + go func() { + defer c.cluster.waitGroup.Done() + err := c.loki.Run(loki.RunOpts{}) + if err != nil { + errCh <- err + } + }() + + select { + case <-readyCh: + break + case err := <-errCh: + return err + } + + return nil +} + +// cleanup calls the stop handler and returns files and directories to be cleaned up +func (c *Component) cleanup() (files []string, dirs []string) { + if c.loki != nil { + c.loki.SignalHandler.Stop() + } + if c.configFile != "" { + files = append(files, c.configFile) + } + if c.dataPath != "" { + dirs = append(dirs, c.dataPath) + } + return files, dirs +} + +func getFreePort() (port int, err error) { + var a *net.TCPAddr + if a, err = net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { + var l *net.TCPListener + if l, err = net.ListenTCP("tcp", a); err == nil { + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil + } + } + return +} diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go new file mode 100644 index 0000000000000..954dd8fce177b --- /dev/null +++ b/integration/loki_micro_services_test.go @@ -0,0 +1,106 @@ +package integration + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/integration/client" + "github.com/grafana/loki/integration/cluster" +) + +func TestMicroServicesIngestQuery(t *testing.T) { + clu := cluster.New() + defer func() { + assert.NoError(t, clu.Cleanup()) + }() + + var ( + tIndexGateway = clu.AddComponent( + "index-gateway", + "-target=index-gateway", + ) + tDistributor = clu.AddComponent( + "distributor", + "-target=distributor", + ) + tIngester = clu.AddComponent( + "ingester", + "-target=ingester", + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL().Host, + ) + tQueryScheduler = clu.AddComponent( + "query-scheduler", + "-target=query-scheduler", + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL().Host, + ) + tQueryFrontend = clu.AddComponent( + "query-frontend", + "-target=query-frontend", + "-frontend.scheduler-address="+tQueryScheduler.GRPCURL().Host, + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL().Host, + ) + _ = clu.AddComponent( + "querier", + "-target=querier", + "-querier.scheduler-address="+tQueryScheduler.GRPCURL().Host, + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL().Host, + ) + ) + + require.NoError(t, clu.Run()) + + tenantID := randStringRunes(12) + + now := time.Now() + cliDistributor := client.New(tenantID, "", tDistributor.HTTPURL().String()) + cliDistributor.Now = now + cliIngester := client.New(tenantID, "", tIngester.HTTPURL().String()) + cliIngester.Now = now + cliQueryFrontend := client.New(tenantID, "", tQueryFrontend.HTTPURL().String()) + cliQueryFrontend.Now = now + + t.Run("ingest-logs-store", func(t *testing.T) { + // ingest some log lines + require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineA", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineB", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + + // TODO: Flushing is currently causing a panic, as the boltdb shipper is shared using a global variable in: + // https://github.com/grafana/loki/blob/66a4692423582ed17cce9bd86b69d55663dc7721/pkg/storage/factory.go#L32-L35 + //require.NoError(t, cliIngester.Flush()) + }) + + t.Run("ingest-logs-ingester", func(t *testing.T) { + // ingest some log lines + require.NoError(t, cliDistributor.PushLogLine("lineC", map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD", map[string]string{"job": "fake"})) + }) + + t.Run("query", func(t *testing.T) { + resp, err := cliQueryFrontend.RunRangeQuery(`{job="fake"}`) + require.NoError(t, err) + assert.Equal(t, "streams", resp.Data.ResultType) + + var lines []string + for _, stream := range resp.Data.Stream { + for _, val := range stream.Values { + lines = append(lines, val[1]) + } + } + assert.ElementsMatch(t, []string{"lineA", "lineB", "lineC", "lineD"}, lines) + }) + + t.Run("label-names", func(t *testing.T) { + resp, err := cliQueryFrontend.LabelNames() + require.NoError(t, err) + assert.ElementsMatch(t, []string{"__name__", "job"}, resp) + }) + + t.Run("label-values", func(t *testing.T) { + resp, err := cliQueryFrontend.LabelValues("job") + require.NoError(t, err) + assert.ElementsMatch(t, []string{"fake"}, resp) + }) +} diff --git a/integration/loki_single_binary_test.go b/integration/loki_single_binary_test.go new file mode 100644 index 0000000000000..b836685785a8a --- /dev/null +++ b/integration/loki_single_binary_test.go @@ -0,0 +1,73 @@ +package integration + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/integration/client" + "github.com/grafana/loki/integration/cluster" +) + +func TestSingleBinaryIngestQuery(t *testing.T) { + clu := cluster.New() + defer func() { + assert.NoError(t, clu.Cleanup()) + }() + + var ( + tAll = clu.AddComponent( + "all", + "-target=all", + ) + ) + + require.NoError(t, clu.Run()) + + tenantID := randStringRunes(12) + cli := client.New(tenantID, "", tAll.HTTPURL().String()) + + t.Run("ingest-logs-store", func(t *testing.T) { + // ingest some log lines + require.NoError(t, cli.PushLogLineWithTimestamp("lineA", cli.Now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cli.PushLogLineWithTimestamp("lineB", cli.Now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + + // TODO: Flushing is currently causing a panic, as the boltdb shipper is shared using a global variable in: + // https://github.com/grafana/loki/blob/66a4692423582ed17cce9bd86b69d55663dc7721/pkg/storage/factory.go#L32-L35 + // require.NoError(t, cli.Flush()) + }) + + t.Run("ingest-logs-ingester", func(t *testing.T) { + // ingest some log lines + require.NoError(t, cli.PushLogLine("lineC", map[string]string{"job": "fake"})) + require.NoError(t, cli.PushLogLine("lineD", map[string]string{"job": "fake"})) + }) + + t.Run("query", func(t *testing.T) { + resp, err := cli.RunRangeQuery(`{job="fake"}`) + require.NoError(t, err) + assert.Equal(t, "streams", resp.Data.ResultType) + + var lines []string + for _, stream := range resp.Data.Stream { + for _, val := range stream.Values { + lines = append(lines, val[1]) + } + } + assert.ElementsMatch(t, []string{"lineA", "lineB", "lineC", "lineD"}, lines) + }) + + t.Run("label-names", func(t *testing.T) { + resp, err := cli.LabelNames() + require.NoError(t, err) + assert.ElementsMatch(t, []string{"__name__", "job"}, resp) + }) + + t.Run("label-values", func(t *testing.T) { + resp, err := cli.LabelValues("job") + require.NoError(t, err) + assert.ElementsMatch(t, []string{"fake"}, resp) + }) +} diff --git a/integration/shared_test.go b/integration/shared_test.go new file mode 100644 index 0000000000000..9ee7316401464 --- /dev/null +++ b/integration/shared_test.go @@ -0,0 +1,20 @@ +package integration + +import ( + "math/rand" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index c811333d66caf..5ce12b9e9ea44 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -228,6 +228,7 @@ type Loki struct { ModuleManager *modules.Manager serviceMap map[string]services.Service deps map[string][]string + SignalHandler *signals.Handler Server *server.Server ring *ring.Ring @@ -395,9 +396,9 @@ func (t *Loki) Run(opts RunOpts) error { sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) // Setup signal handler. If signal arrives, we stop the manager, which stops all the services. - handler := signals.NewHandler(t.Server.Log) + t.SignalHandler = signals.NewHandler(t.Server.Log) go func() { - handler.Loop() + t.SignalHandler.Loop() sm.StopAsync() }() diff --git a/pkg/storage/chunk/fetcher/fetcher.go b/pkg/storage/chunk/fetcher/fetcher.go index bf1bab6364ba2..94e7e82b76869 100644 --- a/pkg/storage/chunk/fetcher/fetcher.go +++ b/pkg/storage/chunk/fetcher/fetcher.go @@ -57,6 +57,7 @@ type Fetcher struct { maxAsyncBufferSize int asyncQueue chan []chunk.Chunk + stopOnce sync.Once stop chan struct{} } @@ -126,10 +127,12 @@ func (c *Fetcher) asyncWriteBackCacheQueueProcessLoop() { // Stop the ChunkFetcher. func (c *Fetcher) Stop() { - close(c.decodeRequests) - c.wait.Wait() - c.cache.Stop() - close(c.stop) + c.stopOnce.Do(func() { + close(c.decodeRequests) + c.wait.Wait() + c.cache.Stop() + close(c.stop) + }) } func (c *Fetcher) worker() { From 6392ddb71bb89c089efcc3b099113db00ca797c5 Mon Sep 17 00:00:00 2001 From: Sashank Agarwal Date: Tue, 3 May 2022 13:45:04 +0530 Subject: [PATCH 058/223] Auto add labels on new pull request (#6065) * add auto labeler github action * add auto labeler github action --- .github/labeler.yml | 7 +++++++ .github/workflows/labeler.yml | 14 ++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 .github/labeler.yml create mode 100644 .github/workflows/labeler.yml diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000000000..936da888352c4 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,7 @@ +area/docs: +- 'docs/**/*' +- 'operator/docs/*' +sig/operator: +- 'operator/**/*' +kind/feature: +- 'operator/docs/enhancements/*' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 0000000000000..19066bc7dc75c --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,14 @@ +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: +- triage: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" From 8b15632dd8e584c04facd48a1cbe26aa807516e3 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Tue, 3 May 2022 11:17:58 +0200 Subject: [PATCH 059/223] Fix(query-frontend): LokiCodec encode/decode works correctly with label values endpoint (#6084) When encode/decode `http.Request` into `querybase.Request` we re-create request.Path. Before this PR, both `/labels/` and `/labels/{name}/values` endpoint was decoding into same URL path `/labels/` before forwarding to the querier. So it always returns label names as response. This bug became visible after enabling all the middleware for `/labels/{name}/values` endpoint as well via #6072 Signed-off-by: Kaviraj --- pkg/querier/queryrange/codec.go | 2 +- pkg/querier/queryrange/codec_test.go | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index cb5c1acca26cd..4d140a871da42 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -331,7 +331,7 @@ func (Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*http } u := &url.URL{ - Path: "/loki/api/v1/labels", + Path: request.Path, // NOTE: this could be either /label or /label/{name}/values endpoint. So forward the original path as it is. RawQuery: params.Encode(), } req := &http.Request{ diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index f61abbafcb099..bcdfd120a072b 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -304,6 +304,26 @@ func Test_codec_labels_EncodeRequest(t *testing.T) { require.Equal(t, toEncode.StartTs, req.(*LokiLabelNamesRequest).StartTs) require.Equal(t, toEncode.EndTs, req.(*LokiLabelNamesRequest).EndTs) require.Equal(t, "/loki/api/v1/labels", req.(*LokiLabelNamesRequest).Path) + + // Test labels values endpoint + toEncode = &LokiLabelNamesRequest{ + Path: "/loki/api/v1/labels/__name__/values", + StartTs: start, + EndTs: end, + } + got, err = LokiCodec.EncodeRequest(ctx, toEncode) + require.NoError(t, err) + require.Equal(t, ctx, got.Context()) + require.Equal(t, "/loki/api/v1/labels/__name__/values", got.URL.Path) + require.Equal(t, fmt.Sprintf("%d", start.UnixNano()), got.URL.Query().Get("start")) + require.Equal(t, fmt.Sprintf("%d", end.UnixNano()), got.URL.Query().Get("end")) + + // testing a full roundtrip + req, err = LokiCodec.DecodeRequest(context.TODO(), got, nil) + require.NoError(t, err) + require.Equal(t, toEncode.StartTs, req.(*LokiLabelNamesRequest).StartTs) + require.Equal(t, toEncode.EndTs, req.(*LokiLabelNamesRequest).EndTs) + require.Equal(t, "/loki/api/v1/labels/__name__/values", req.(*LokiLabelNamesRequest).Path) } func Test_codec_EncodeResponse(t *testing.T) { From 93de7a7061f4ada4213874247668ae66480e6695 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 3 May 2022 07:15:39 -0400 Subject: [PATCH 060/223] renames skip -> ok in ProcessString (#6064) * renames skip -> ok in ProcessString Signed-off-by: Owen Diehl * streampipeline matches convention Signed-off-by: Owen Diehl --- pkg/chunkenc/memchunk.go | 9 ++--- pkg/chunkenc/unordered.go | 5 +-- pkg/logcli/client/file.go | 5 +-- pkg/logql/log/metrics_extraction.go | 8 ++--- pkg/logql/log/pipeline.go | 12 +++---- pkg/logql/log/pipeline_test.go | 54 ++++++++++++++--------------- pkg/logql/syntax/ast_test.go | 8 ++--- pkg/logql/syntax/parser_test.go | 4 +-- pkg/logql/test_utils.go | 2 +- 9 files changed, 55 insertions(+), 52 deletions(-) diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go index ffb4f179a0de1..4dec0ca13301a 100644 --- a/pkg/chunkenc/memchunk.go +++ b/pkg/chunkenc/memchunk.go @@ -1010,12 +1010,13 @@ func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, return } stats.AddHeadChunkBytes(int64(len(e.s))) - newLine, parsedLbs, ok := pipeline.ProcessString(e.t, e.s) - if !ok { + newLine, parsedLbs, matches := pipeline.ProcessString(e.t, e.s) + if !matches { return } var stream *logproto.Stream labels := parsedLbs.Labels().String() + var ok bool if stream, ok = streams[labels]; !ok { stream = &logproto.Stream{ Labels: labels, @@ -1267,8 +1268,8 @@ func (e *entryBufferedIterator) StreamHash() uint64 { return e.pipeline.BaseLabe func (e *entryBufferedIterator) Next() bool { for e.bufferedIterator.Next() { - newLine, lbs, ok := e.pipeline.Process(e.currTs, e.currLine) - if !ok { + newLine, lbs, matches := e.pipeline.Process(e.currTs, e.currLine) + if !matches { continue } e.cur.Timestamp = time.Unix(0, e.currTs) diff --git a/pkg/chunkenc/unordered.go b/pkg/chunkenc/unordered.go index 0a520007ffe67..ed7dc608b4117 100644 --- a/pkg/chunkenc/unordered.go +++ b/pkg/chunkenc/unordered.go @@ -227,13 +227,14 @@ func (hb *unorderedHeadBlock) Iterator( mint, maxt, func(ts int64, line string) error { - newLine, parsedLbs, ok := pipeline.ProcessString(ts, line) - if !ok { + newLine, parsedLbs, matches := pipeline.ProcessString(ts, line) + if !matches { return nil } var stream *logproto.Stream labels := parsedLbs.String() + var ok bool if stream, ok = streams[labels]; !ok { stream = &logproto.Stream{ Labels: labels, diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go index 94621f6aefba5..6024aebdb0a5c 100644 --- a/pkg/logcli/client/file.go +++ b/pkg/logcli/client/file.go @@ -234,13 +234,14 @@ func newFileIterator( processLine := func(line string) { ts := time.Now() - parsedLine, parsedLabels, ok := pipeline.ProcessString(ts.UnixNano(), line) - if !ok { + parsedLine, parsedLabels, matches := pipeline.ProcessString(ts.UnixNano(), line) + if !matches { return } var stream *logproto.Stream lhash := parsedLabels.Hash() + var ok bool if stream, ok = streams[lhash]; !ok { stream = &logproto.Stream{ Labels: parsedLabels.String(), diff --git a/pkg/logql/log/metrics_extraction.go b/pkg/logql/log/metrics_extraction.go index f1f61ca72bbc3..cba4a9b84ea89 100644 --- a/pkg/logql/log/metrics_extraction.go +++ b/pkg/logql/log/metrics_extraction.go @@ -249,8 +249,8 @@ func (sp *filteringStreamExtractor) Process(ts int64, line []byte) (float64, Lab continue } - _, _, skip := filter.pipeline.Process(ts, line) - if skip { //When the filter matches, don't run the next step + _, _, matches := filter.pipeline.Process(ts, line) + if matches { //When the filter matches, don't run the next step return 0, nil, false } } @@ -264,8 +264,8 @@ func (sp *filteringStreamExtractor) ProcessString(ts int64, line string) (float6 continue } - _, _, skip := filter.pipeline.ProcessString(ts, line) - if skip { //When the filter matches, don't run the next step + _, _, matches := filter.pipeline.ProcessString(ts, line) + if matches { //When the filter matches, don't run the next step return 0, nil, false } } diff --git a/pkg/logql/log/pipeline.go b/pkg/logql/log/pipeline.go index a7f8e6453dc4c..fa6f4180ff23e 100644 --- a/pkg/logql/log/pipeline.go +++ b/pkg/logql/log/pipeline.go @@ -19,8 +19,8 @@ type Pipeline interface { // A StreamPipeline never mutate the received line. type StreamPipeline interface { BaseLabels() LabelsResult - Process(ts int64, line []byte) (resultLine []byte, resultLabels LabelsResult, skip bool) - ProcessString(ts int64, line string) (resultLine string, resultLabels LabelsResult, skip bool) + Process(ts int64, line []byte) (resultLine []byte, resultLabels LabelsResult, matches bool) + ProcessString(ts int64, line string) (resultLine string, resultLabels LabelsResult, matches bool) } // Stage is a single step of a Pipeline. @@ -232,8 +232,8 @@ func (sp *filteringStreamPipeline) Process(ts int64, line []byte) ([]byte, Label continue } - _, _, skip := filter.pipeline.Process(ts, line) - if skip { // When the filter matches, don't run the next step + _, _, matches := filter.pipeline.Process(ts, line) + if matches { // When the filter matches, don't run the next step return nil, nil, false } } @@ -247,8 +247,8 @@ func (sp *filteringStreamPipeline) ProcessString(ts int64, line string) (string, continue } - _, _, skip := filter.pipeline.ProcessString(ts, line) - if skip { // When the filter matches, don't run the next step + _, _, matches := filter.pipeline.ProcessString(ts, line) + if matches { // When the filter matches, don't run the next step return "", nil, false } } diff --git a/pkg/logql/log/pipeline_test.go b/pkg/logql/log/pipeline_test.go index 912e03ad50819..24d2c05da6db0 100644 --- a/pkg/logql/log/pipeline_test.go +++ b/pkg/logql/log/pipeline_test.go @@ -12,15 +12,15 @@ import ( func TestNoopPipeline(t *testing.T) { lbs := labels.Labels{{Name: "foo", Value: "bar"}} - l, lbr, ok := NewNoopPipeline().ForStream(lbs).Process(0, []byte("")) + l, lbr, matches := NewNoopPipeline().ForStream(lbs).Process(0, []byte("")) require.Equal(t, []byte(""), l) require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) - require.Equal(t, true, ok) + require.Equal(t, true, matches) - ls, lbr, ok := NewNoopPipeline().ForStream(lbs).ProcessString(0, "") + ls, lbr, matches := NewNoopPipeline().ForStream(lbs).ProcessString(0, "") require.Equal(t, "", ls) require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) - require.Equal(t, true, ok) + require.Equal(t, true, matches) } func TestPipeline(t *testing.T) { @@ -29,25 +29,25 @@ func TestPipeline(t *testing.T) { NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")), newMustLineFormatter("lbs {{.foo}}"), }) - l, lbr, ok := p.ForStream(lbs).Process(0, []byte("line")) + l, lbr, matches := p.ForStream(lbs).Process(0, []byte("line")) require.Equal(t, []byte("lbs bar"), l) require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) - require.Equal(t, true, ok) + require.Equal(t, true, matches) - ls, lbr, ok := p.ForStream(lbs).ProcessString(0, "line") + ls, lbr, matches := p.ForStream(lbs).ProcessString(0, "line") require.Equal(t, "lbs bar", ls) require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) - require.Equal(t, true, ok) + require.Equal(t, true, matches) - l, lbr, ok = p.ForStream(labels.Labels{}).Process(0, []byte("line")) + l, lbr, matches = p.ForStream(labels.Labels{}).Process(0, []byte("line")) require.Equal(t, []byte(nil), l) require.Equal(t, nil, lbr) - require.Equal(t, false, ok) + require.Equal(t, false, matches) - ls, lbr, ok = p.ForStream(labels.Labels{}).ProcessString(0, "line") + ls, lbr, matches = p.ForStream(labels.Labels{}).ProcessString(0, "line") require.Equal(t, "", ls) require.Equal(t, nil, lbr) - require.Equal(t, false, ok) + require.Equal(t, false, matches) } func TestFilteringPipeline(t *testing.T) { @@ -74,11 +74,11 @@ func TestFilteringPipeline(t *testing.T) { for _, test := range tt { t.Run(test.name, func(t *testing.T) { - _, _, ok := p.ForStream(test.inputStreamLabels).Process(test.ts, []byte(test.line)) - require.Equal(t, test.ok, ok) + _, _, matches := p.ForStream(test.inputStreamLabels).Process(test.ts, []byte(test.line)) + require.Equal(t, test.ok, matches) - _, _, ok = p.ForStream(test.inputStreamLabels).ProcessString(test.ts, test.line) - require.Equal(t, test.ok, ok) + _, _, matches = p.ForStream(test.inputStreamLabels).ProcessString(test.ts, test.line) + require.Equal(t, test.ok, matches) }) } } @@ -127,7 +127,7 @@ func (p *stubStreamPipeline) ProcessString(ts int64, line string) (string, Label } var ( - resOK bool + resMatches bool resLine []byte resLineString string resLbs LabelsResult @@ -168,13 +168,13 @@ func Benchmark_Pipeline(b *testing.B) { b.Run("pipeline bytes", func(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resLine, resLbs, resOK = sp.Process(0, line) + resLine, resLbs, resMatches = sp.Process(0, line) } }) b.Run("pipeline string", func(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resLineString, resLbs, resOK = sp.ProcessString(0, lineString) + resLineString, resLbs, resMatches = sp.ProcessString(0, lineString) } }) @@ -184,13 +184,13 @@ func Benchmark_Pipeline(b *testing.B) { b.Run("line extractor bytes", func(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resSample, resLbs, resOK = ex.Process(0, line) + resSample, resLbs, resMatches = ex.Process(0, line) } }) b.Run("line extractor string", func(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resSample, resLbs, resOK = ex.ProcessString(0, lineString) + resSample, resLbs, resMatches = ex.ProcessString(0, lineString) } }) @@ -201,13 +201,13 @@ func Benchmark_Pipeline(b *testing.B) { b.Run("label extractor bytes", func(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resSample, resLbs, resOK = ex.Process(0, line) + resSample, resLbs, resMatches = ex.Process(0, line) } }) b.Run("label extractor string", func(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resSample, resLbs, resOK = ex.ProcessString(0, lineString) + resSample, resLbs, resMatches = ex.ProcessString(0, lineString) } }) } @@ -240,9 +240,9 @@ func jsonBenchmark(b *testing.B, parser Stage) { b.ResetTimer() sp := p.ForStream(lbs) for n := 0; n < b.N; n++ { - resLine, resLbs, resOK = sp.Process(0, line) + resLine, resLbs, resMatches = sp.Process(0, line) - if !resOK { + if !resMatches { b.Fatalf("resulting line not ok: %s\n", line) } @@ -263,9 +263,9 @@ func invalidJSONBenchmark(b *testing.B, parser Stage) { b.ResetTimer() sp := p.ForStream(labels.Labels{}) for n := 0; n < b.N; n++ { - resLine, resLbs, resOK = sp.Process(0, line) + resLine, resLbs, resMatches = sp.Process(0, line) - if !resOK { + if !resMatches { b.Fatalf("resulting line not ok: %s\n", line) } diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index 9fc9a33c288f6..6dd1532a659be 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -193,9 +193,9 @@ func Test_NilFilterDoesntPanic(t *testing.T) { p, err := expr.Pipeline() require.Nil(t, err) - _, _, ok := p.ForStream(labelBar).Process(0, []byte("bleepbloop")) + _, _, matches := p.ForStream(labelBar).Process(0, []byte("bleepbloop")) - require.True(t, ok) + require.True(t, matches) }) } } @@ -287,8 +287,8 @@ func Test_FilterMatcher(t *testing.T) { } else { sp := p.ForStream(labelBar) for _, lc := range tt.lines { - _, _, ok := sp.Process(0, []byte(lc.l)) - assert.Equalf(t, lc.e, ok, "query for line '%s' was %v and not %v", lc.l, ok, lc.e) + _, _, matches := sp.Process(0, []byte(lc.l)) + assert.Equalf(t, lc.e, matches, "query for line '%s' was %v and not %v", lc.l, matches, lc.e) } } }) diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go index 8747c0e165574..56d09b4e41b04 100644 --- a/pkg/logql/syntax/parser_test.go +++ b/pkg/logql/syntax/parser_test.go @@ -3040,8 +3040,8 @@ func Test_PipelineCombined(t *testing.T) { p, err := expr.Pipeline() require.Nil(t, err) sp := p.ForStream(labels.Labels{}) - line, lbs, ok := sp.Process(0, []byte(`level=debug ts=2020-10-02T10:10:42.092268913Z caller=logging.go:66 traceID=a9d4d8a928d8db1 msg="POST /api/prom/api/v1/query_range (200) 1.5s"`)) - require.True(t, ok) + line, lbs, matches := sp.Process(0, []byte(`level=debug ts=2020-10-02T10:10:42.092268913Z caller=logging.go:66 traceID=a9d4d8a928d8db1 msg="POST /api/prom/api/v1/query_range (200) 1.5s"`)) + require.True(t, matches) require.Equal( t, labels.Labels{labels.Label{Name: "caller", Value: "logging.go:66"}, labels.Label{Name: "duration", Value: "1.5s"}, labels.Label{Name: "level", Value: "debug"}, labels.Label{Name: "method", Value: "POST"}, labels.Label{Name: "msg", Value: "POST /api/prom/api/v1/query_range (200) 1.5s"}, labels.Label{Name: "path", Value: "/api/prom/api/v1/query_range"}, labels.Label{Name: "status", Value: "200"}, labels.Label{Name: "traceID", Value: "a9d4d8a928d8db1"}, labels.Label{Name: "ts", Value: "2020-10-02T10:10:42.092268913Z"}}, diff --git a/pkg/logql/test_utils.go b/pkg/logql/test_utils.go index e40f12ed0cdc4..225ad0d5a2498 100644 --- a/pkg/logql/test_utils.go +++ b/pkg/logql/test_utils.go @@ -101,7 +101,7 @@ func processStream(in []logproto.Stream, pipeline log.Pipeline) []logproto.Strea for _, stream := range in { for _, e := range stream.Entries { sp := pipeline.ForStream(mustParseLabels(stream.Labels)) - if l, out, ok := sp.Process(e.Timestamp.UnixNano(), []byte(e.Line)); ok { + if l, out, matches := sp.Process(e.Timestamp.UnixNano(), []byte(e.Line)); matches { var s *logproto.Stream var found bool s, found = resByStream[out.String()] From c76565af53fa694ff128061d52a1cd1ae24668bd Mon Sep 17 00:00:00 2001 From: Sashank Agarwal Date: Tue, 3 May 2022 19:11:50 +0530 Subject: [PATCH 061/223] fix labeler (#6087) --- .github/workflows/labeler.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 19066bc7dc75c..057208eda328d 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -3,7 +3,7 @@ on: - pull_request_target jobs: -- triage: + triage: permissions: contents: read pull-requests: write From 2264b7331198a29c1022b0cba6f413309b734d5e Mon Sep 17 00:00:00 2001 From: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Date: Tue, 3 May 2022 10:01:47 -0700 Subject: [PATCH 062/223] Docs: remove Cortex references (#6079) * Docs: remove Cortex references * Bug fix: (after PR 6015) Cortex to Loki update --- docs/sources/configuration/_index.md | 4 ++-- docs/sources/configuration/query-frontend.md | 2 +- docs/sources/logql/log_queries.md | 10 +++++----- docs/sources/logql/template_functions.md | 6 +++--- docs/sources/operations/recording-rules.md | 6 ++---- docs/sources/operations/scalability.md | 5 +++-- docs/sources/rules/_index.md | 4 ++-- docs/sources/tools/logcli.md | 10 +++++----- pkg/storage/chunk/client/azure/blob_storage_client.go | 2 +- 9 files changed, 24 insertions(+), 25 deletions(-) diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 9a1c3c0dd6520..5e0f19275ce34 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -1317,7 +1317,7 @@ those components specific configuration sections. # Other cluster members to join. Can be specified multiple times. It can be an # IP, hostname or an entry specified in the DNS Service Discovery format (see -# https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery +# https://grafana.com/docs/mimir/latest/operators-guide/configuring/about-dns-service-discovery/ # for more details). # CLI flag: -memberlist.join [join_members: | default = ] @@ -1802,7 +1802,7 @@ memcached_client: [service: | default = "memcached"] # (Experimental) Comma-separated addresses list in DNS Service Discovery format: - # https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery + # https://grafana.com/docs/mimir/latest/operators-guide/configuring/about-dns-service-discovery/ # CLI flag: -.memcached.addresses [addresses: | default = ""] diff --git a/docs/sources/configuration/query-frontend.md b/docs/sources/configuration/query-frontend.md index 29df1b94f52af..477822bd426d8 100644 --- a/docs/sources/configuration/query-frontend.md +++ b/docs/sources/configuration/query-frontend.md @@ -29,7 +29,7 @@ metadata: namespace: data: config.yaml: | - # Disable the requirement that every request to Cortex has a + # Disable the requirement that every request to Loki has a # X-Scope-OrgID header. `fake` will be substituted in instead. auth_enabled: false diff --git a/docs/sources/logql/log_queries.md b/docs/sources/logql/log_queries.md index 894cd4dd38fcf..d70993be4d574 100644 --- a/docs/sources/logql/log_queries.md +++ b/docs/sources/logql/log_queries.md @@ -543,7 +543,7 @@ level=debug ts=2020-10-02T10:10:42.092268913Z caller=logging.go:66 traceID=a9d4d You can use multiple parsers (logfmt and regexp) like this. ```logql -{job="cortex-ops/query-frontend"} | logfmt | line_format "{{.msg}}" | regexp "(?P\\w+) (?P[\\w|/]+) \\((?P\\d+?)\\) (?P.*)" +{job="loki-ops/query-frontend"} | logfmt | line_format "{{.msg}}" | regexp "(?P\\w+) (?P[\\w|/]+) \\((?P\\d+?)\\) (?P.*)" ``` This is possible because the `| line_format` reformats the log line to become `POST /api/prom/api/v1/query_range (200) 1.5s` which can then be parsed with the `| regexp ...` parser. @@ -566,13 +566,13 @@ Label formatting is used to sanitize the query while the line format reduce the For these given log lines: ```log -level=info ts=2020-10-23T20:32:18.094668233Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"cortex-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=650.22401ms status=200 throughput_mb=1.529717 total_bytes_mb=0.994659 -level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"cortex-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=624.008132ms status=200 throughput_mb=0.693449 total_bytes_mb=0.432718 +level=info ts=2020-10-23T20:32:18.094668233Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"loki-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=650.22401ms status=200 throughput_mb=1.529717 total_bytes_mb=0.994659 +level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"loki-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=624.008132ms status=200 throughput_mb=0.693449 total_bytes_mb=0.432718 ``` The result would be: ```log -2020-10-23T20:32:18.094668233Z 650.22401ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="cortex-ops/query-frontend"} |= "query_range" -2020-10-23T20:32:18.068866235Z 624.008132ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="cortex-ops/query-frontend"} |= "query_range" +2020-10-23T20:32:18.094668233Z 650.22401ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="loki-ops/query-frontend"} |= "query_range" +2020-10-23T20:32:18.068866235Z 624.008132ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="loki-ops/query-frontend"} |= "query_range" ``` diff --git a/docs/sources/logql/template_functions.md b/docs/sources/logql/template_functions.md index 33b4e52d1e6d4..aad284691f813 100644 --- a/docs/sources/logql/template_functions.md +++ b/docs/sources/logql/template_functions.md @@ -627,7 +627,7 @@ fromJson "{\"foo\": 55}" Example of a query to print a newline per queries stored as a json array in the log line: ```logql -{job="cortex/querier"} |= "finish in prometheus" | logfmt | line_format "{{ range $q := fromJson .queries }} {{ $q.query }} {{ end }}" +{job="loki/querier"} |= "finish in prometheus" | logfmt | line_format "{{ range $q := fromJson .queries }} {{ $q.query }} {{ end }}" ``` ## now @@ -662,7 +662,7 @@ Example of a query to print a newline per queries stored as a json array in the { unixEpoch now }} ``` -Example of a query to filter cortex querier jobs which create time is 1 day before: +Example of a query to filter Loki querier jobs which create time is 1 day before: ```logql -{job="cortex/querier"} | label_format nowEpoch=`{{(unixEpoch now)}}`,createDateEpoch=`{{unixEpoch (toDate "2006-01-02" .createDate)}}` | label_format dateTimeDiff="{{sub .nowEpoch .createDateEpoch}}" | dateTimeDiff > 86400 +{job="loki/querier"} | label_format nowEpoch=`{{(unixEpoch now)}}`,createDateEpoch=`{{unixEpoch (toDate "2006-01-02" .createDate)}}` | label_format dateTimeDiff="{{sub .nowEpoch .createDateEpoch}}" | dateTimeDiff > 86400 ``` diff --git a/docs/sources/operations/recording-rules.md b/docs/sources/operations/recording-rules.md index 2128f882553b7..03a9d0553b264 100644 --- a/docs/sources/operations/recording-rules.md +++ b/docs/sources/operations/recording-rules.md @@ -27,7 +27,7 @@ is that Prometheus will, for example, reject a remote-write request with 100 sam When the `ruler` starts up, it will load the WALs for the tenants who have recording rules. These WAL files are stored on disk and are loaded into memory. -Note: WALs are loaded one at a time upon start-up. This is a current limitation of the Cortex Ruler which Loki inherits. +Note: WALs are loaded one at a time upon start-up. This is a current limitation of the Loki ruler. For this reason, it is adviseable that the number of rule groups serviced by a ruler be kept to a reasonable size, since _no rule evaluation occurs while WAL replay is in progress (this includes alerting rules)_. @@ -48,9 +48,7 @@ excessively large due to truncation. ## Scaling -Loki's `ruler` component is based on Cortex's `ruler`. - -See Cortex's guide for [horizontally scaling the `ruler`](https://cortexmetrics.io/docs/guides/ruler-sharding/) using the ring. +See Mimir's guide for [configuring Grafana Mimir hash rings](https://grafana.com/docs/mimir/latest/operators-guide/configuring/configuring-hash-rings/) for scaling the ruler using a ring. Note: the `ruler` shards by rule _group_, not by individual rules. This is an artifact of the fact that Prometheus recording rules need to run in order since one recording rule can reuse another - but this is not possible in Loki. diff --git a/docs/sources/operations/scalability.md b/docs/sources/operations/scalability.md index bdebcc2dd4a54..934ae47361e6d 100644 --- a/docs/sources/operations/scalability.md +++ b/docs/sources/operations/scalability.md @@ -15,13 +15,14 @@ and scaling for resource usage. ## Separate Query Scheduler -The Query frontend has an in-memory queue that can be moved out into a separate process similar to the [Cortex Query Scheduler](https://cortexmetrics.io/docs/operations/scaling-query-frontend/#query-scheduler). This allows running multiple query frontends. +The Query frontend has an in-memory queue that can be moved out into a separate process similar to the +[Grafana Mimir query-scheduler](https://grafana.com/docs/mimir/latest/operators-guide/architecture/components/query-scheduler/). This allows running multiple query frontends. In order to run with the Query Scheduler, the frontend needs to be passed the scheduler's address via `-frontend.scheduler-address` and the querier processes needs to be started with `-querier.scheduler-address` set to the same address. Both options can also be defined via the [configuration file](../configuration). It is not valid to start the querier with both a configured frontend and a scheduler address. -The query scheduler process itself can be started via the `-target=query-scheduler` option of the Loki Docker image. For instance, `docker run grafana/loki:latest -config.file=/cortex/config/cortex.yaml -target=query-scheduler -server.http-listen-port=8009 -server.grpc-listen-port=9009` starts the query scheduler listening on ports `8009` and `9009`. +The query scheduler process itself can be started via the `-target=query-scheduler` option of the Loki Docker image. For instance, `docker run grafana/loki:latest -config.file=/mimir/config/mimir.yaml -target=query-scheduler -server.http-listen-port=8009 -server.grpc-listen-port=9009` starts the query scheduler listening on ports `8009` and `9009`. ## Memory ballast diff --git a/docs/sources/rules/_index.md b/docs/sources/rules/_index.md index 9dd214861212e..99339d22398f7 100644 --- a/docs/sources/rules/_index.md +++ b/docs/sources/rules/_index.md @@ -7,7 +7,7 @@ weight: 700 # Rules and the Ruler -Grafana Loki includes a component called the Ruler, adapted from our upstream project, Cortex. The Ruler is responsible for continually evaluating a set of configurable queries and performing an action based on the result. +Grafana Loki includes a component called the ruler. The ruler is responsible for continually evaluating a set of configurable queries and performing an action based on the result. This example configuration sources rules from a local disk. @@ -107,7 +107,7 @@ At the time of writing, these are the compatible backends that support this: - [Prometheus](https://prometheus.io/docs/prometheus/latest/disabled_features/#remote-write-receiver) (`>=v2.25.0`): Prometheus is generally a pull-based system, but since `v2.25.0` has allowed for metrics to be written directly to it as well. -- [Cortex](https://cortexmetrics.io/docs/api/#remote-write) +- [Grafana Mimir](https://grafana.com/docs/mimir/latest/operators-guide/reference-http-api/#remote-write) - [Thanos (`Receiver`)](https://thanos.io/tip/components/receive.md/) Here is an example remote-write configuration for sending to a local Prometheus instance: diff --git a/docs/sources/tools/logcli.md b/docs/sources/tools/logcli.md index 2b8cef4691890..91d81502dd2e0 100644 --- a/docs/sources/tools/logcli.md +++ b/docs/sources/tools/logcli.md @@ -59,13 +59,13 @@ export LOKI_ADDR=http://localhost:3100 ```bash $ logcli labels job https://logs-dev-ops-tools1.grafana.net/api/prom/label/job/values -cortex-ops/consul -cortex-ops/cortex-gw +loki-ops/consul +loki-ops/loki-gw ... -$ logcli query '{job="cortex-ops/consul"}' -https://logs-dev-ops-tools1.grafana.net/api/prom/query?query=%7Bjob%3D%22cortex-ops%2Fconsul%22%7D&limit=30&start=1529928228&end=1529931828&direction=backward®exp= -Common labels: {job="cortex-ops/consul", namespace="cortex-ops"} +$ logcli query '{job="loki-ops/consul"}' +https://logs-dev-ops-tools1.grafana.net/api/prom/query?query=%7Bjob%3D%22loki-ops%2Fconsul%22%7D&limit=30&start=1529928228&end=1529931828&direction=backward®exp= +Common labels: {job="loki-ops/consul", namespace="loki-ops"} 2018-06-25T12:52:09Z {instance="consul-8576459955-pl75w"} 2018/06/25 12:52:09 [INFO] raft: Snapshot to 475409 complete 2018-06-25T12:52:09Z {instance="consul-8576459955-pl75w"} 2018/06/25 12:52:09 [INFO] raft: Compacting logs from 456973 to 465169 ... diff --git a/pkg/storage/chunk/client/azure/blob_storage_client.go b/pkg/storage/chunk/client/azure/blob_storage_client.go index f74ade5f6811f..e4fef88bcb635 100644 --- a/pkg/storage/chunk/client/azure/blob_storage_client.go +++ b/pkg/storage/chunk/client/azure/blob_storage_client.go @@ -105,7 +105,7 @@ func (c *BlobStorageConfig) RegisterFlags(f *flag.FlagSet) { // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&c.Environment, prefix+"azure.environment", azureGlobal, fmt.Sprintf("Azure Cloud environment. Supported values are: %s.", strings.Join(supportedEnvironments, ", "))) - f.StringVar(&c.ContainerName, prefix+"azure.container-name", "loki", "Name of the blob container used to store chunks. This container must be created before running cortex.") + f.StringVar(&c.ContainerName, prefix+"azure.container-name", "loki", "Name of the blob container used to store chunks. This container must be created before running Loki.") f.StringVar(&c.AccountName, prefix+"azure.account-name", "", "The Microsoft Azure account name to be used") f.StringVar(&c.ChunkDelimiter, prefix+"azure.chunk-delimiter", "-", "Chunk delimiter for blob ID to be used") f.Var(&c.AccountKey, prefix+"azure.account-key", "The Microsoft Azure account key to use.") From 0a4bfbe4f76788d7bfa7ccd779771b3a7f71f8dd Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Wed, 4 May 2022 09:59:01 +0200 Subject: [PATCH 063/223] Remove nosiy "inflight" requests log entries (#6074) We have enough metrics now (queue time, queue length and metrics.go statistics) that makes these log entries obselete. Signed-off-by: Kaviraj --- pkg/querier/worker/scheduler_processor.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index 5a09da177d5c9..ed18897ec44d1 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -22,8 +22,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" - "github.com/grafana/dskit/tenant" - "github.com/grafana/loki/pkg/lokifrontend/frontend/v2/frontendv2pb" querier_stats "github.com/grafana/loki/pkg/querier/stats" "github.com/grafana/loki/pkg/scheduler/schedulerpb" @@ -129,15 +127,11 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer logger = util_log.WithContext(ctx, sp.log) ) - start := time.Now() - tenant, _ := tenant.TenantID(ctx) sp.metrics.inflightRequests.Inc() - level.Debug(logger).Log("msg", "tracking inflight request", "tenant", tenant, "op", "enqueue") sp.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.StatsEnabled, request.HttpRequest) sp.metrics.inflightRequests.Dec() - level.Debug(logger).Log("msg", "tracking inflight request", "tenant", tenant, "op", "dequeue", "duration", time.Since(start)) // Report back to scheduler that processing of the query has finished. if err := c.Send(&schedulerpb.QuerierToScheduler{}); err != nil { From 1d25e17b321a13c451db3e4c5f58fd1a20629c9e Mon Sep 17 00:00:00 2001 From: n4mine Date: Wed, 4 May 2022 15:59:35 +0800 Subject: [PATCH 064/223] Docs: fix pic location in `sources/fundamentals/architecture/deployment-modes.md` (#6089) --- docs/sources/fundamentals/architecture/deployment-modes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/fundamentals/architecture/deployment-modes.md b/docs/sources/fundamentals/architecture/deployment-modes.md index 151d31a7cd330..9055fc24b07bc 100644 --- a/docs/sources/fundamentals/architecture/deployment-modes.md +++ b/docs/sources/fundamentals/architecture/deployment-modes.md @@ -28,7 +28,7 @@ This is monolithic mode; it runs all of Loki’s microservice components inside a single process as a single binary or Docker image. -![monolithic mode diagram](../monolithic-mode.png) +![monolithic mode diagram](./monolithic-mode.png) Monolithic mode is useful for getting started quickly to experiment with Loki, as well as for small read/write volumes of up to approximately 100GB per day. @@ -54,7 +54,7 @@ Loki provides the simple scalable deployment mode. This deployment mode can scale to several TBs of logs per day and more. Consider the microservices mode approach for very large Loki installations. -![simple scalable deployment mode diagram](../simple-scalable.png) +![simple scalable deployment mode diagram](./simple-scalable.png) In this mode the component microservices of Loki are bundled into two targets: `-target=read` and `-target=write`. @@ -89,7 +89,7 @@ Each process is invoked specifying its `target`: * ruler * compactor -![microservices mode diagram](../microservices-mode.png) +![microservices mode diagram](./microservices-mode.png) Running components as individual microservices allows scaling up by increasing the quantity of microservices. From b3f6f6b415b266d604524638e3d7a5eb8acfe6d0 Mon Sep 17 00:00:00 2001 From: Patrick Mansfield <42042889+patman-cp@users.noreply.github.com> Date: Wed, 4 May 2022 01:02:48 -0700 Subject: [PATCH 065/223] table_manager: Change some level.Info() logging to level.Debug() (#6076) There is excessive logging of "creating table" and "synching tables" messages, change them to use level.Debug() instead of level.Info() to limit the output. Fixes #1786 --- pkg/storage/stores/series/index/table_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storage/stores/series/index/table_manager.go b/pkg/storage/stores/series/index/table_manager.go index 9950eb5982403..4db98b851f279 100644 --- a/pkg/storage/stores/series/index/table_manager.go +++ b/pkg/storage/stores/series/index/table_manager.go @@ -325,7 +325,7 @@ func (m *TableManager) SyncTables(ctx context.Context) error { } expected := m.calculateExpectedTables() - level.Info(util_log.Logger).Log("msg", "synching tables", "expected_tables", len(expected)) + level.Debug(util_log.Logger).Log("msg", "synching tables", "expected_tables", len(expected)) toCreate, toCheckThroughput, toDelete, err := m.partitionTables(ctx, expected) if err != nil { @@ -476,7 +476,7 @@ func (m *TableManager) createTables(ctx context.Context, descriptions []config.T merr := tsdb_errors.NewMulti() for _, desc := range descriptions { - level.Info(util_log.Logger).Log("msg", "creating table", "table", desc.Name) + level.Debug(util_log.Logger).Log("msg", "creating table", "table", desc.Name) err := m.client.CreateTable(ctx, desc) if err != nil { numFailures++ From 1389857846eac10f0b2faf55526406003af976cd Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Wed, 4 May 2022 14:46:05 +0200 Subject: [PATCH 066/223] `metrics.go` support for metadata queries(labels and series) (#5971) * Add different functions to record labels and series queries * Add tests for logging series and labels queries * Add statistics to the Series and Labels proto response types * Plug in the statistics recorder on the query handler * Remove duplicate imports and cleanup comments * Remove the need to add usage statistics for metadata queries. * Add status based on returned error from the api * Fixing lint error by fixin return values order * Use explicit logger * Remove unused recorders * Add `total_entries` representing length of total result returned * Inject statscollector middleware for metadata queries * Fix query frontend stats middleware Signed-off-by: Kaviraj * Fix tests with change in extra `totalEntriesReturned` field Signed-off-by: Kaviraj * `make check-generated-files` Signed-off-by: Kaviraj * Fix typo Signed-off-by: Kaviraj * Fix tripperware middleware for label values endpoint Signed-off-by: Kaviraj * Fix bug with label extraction Signed-off-by: Kaviraj * Add changelog entry Signed-off-by: Kaviraj * Fix `total_entries` value for range query Signed-off-by: Kaviraj --- CHANGELOG.md | 5 +- pkg/chunkenc/memchunk_test.go | 4 +- pkg/iter/entry_iterator_test.go | 2 +- pkg/logql/engine.go | 18 +- pkg/logql/metrics.go | 102 +++++++- pkg/logql/metrics_test.go | 57 ++++- pkg/logqlmodel/stats/context.go | 12 +- pkg/logqlmodel/stats/context_test.go | 10 +- pkg/logqlmodel/stats/stats.pb.go | 137 +++++++---- pkg/logqlmodel/stats/stats.proto | 2 + pkg/querier/http.go | 40 ++++ pkg/querier/queryrange/codec.go | 66 +++++- pkg/querier/queryrange/codec_test.go | 2 + pkg/querier/queryrange/prometheus_test.go | 1 + pkg/querier/queryrange/queryrange.pb.go | 272 +++++++++++++++------- pkg/querier/queryrange/queryrange.proto | 8 + pkg/querier/queryrange/roundtrip.go | 2 + pkg/querier/queryrange/stats.go | 85 ++++++- pkg/storage/batch_test.go | 2 +- pkg/util/marshal/legacy/marshal_test.go | 1 + pkg/util/marshal/marshal_test.go | 3 + pkg/util/server/error.go | 28 ++- 22 files changed, 693 insertions(+), 166 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed4d4208fffb3..07d36e3493ebf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,10 @@ ## Main +* [5971](https://github.com/grafana/loki/pull/5971) **kavirajk**: Record statistics about metadata queries such as labels and series queries in `metrics.go` as well * [5790](https://github.com/grafana/loki/pull/5790) **chaudum**: Add UDP support for Promtail's syslog target. * [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. * [5943](https://github.com/grafana/loki/pull/5943) **tpaschalis**: Add support for exclusion patterns in Promtail's static_config -* [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. +* [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist. @@ -107,7 +108,7 @@ Release notes for 2.5.0 can be found on the [release notes page](https://grafana ### All Changes -Here is a list of all significant changes, in the past we have included all changes +Here is a list of all significant changes, in the past we have included all changes but with over 500 PR's merged since the last release we decided to curate the list to include only the most relevant. diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 0b6d7e20d443e..11d02384f3868 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -566,7 +566,7 @@ func TestChunkStats(t *testing.T) { t.Fatal(err) } // test on a chunk filling up - s := statsCtx.Result(time.Since(first), 0) + s := statsCtx.Result(time.Since(first), 0, 0) require.Equal(t, int64(expectedSize), s.Summary.TotalBytesProcessed) require.Equal(t, int64(inserted), s.Summary.TotalLinesProcessed) @@ -593,7 +593,7 @@ func TestChunkStats(t *testing.T) { if err := it.Close(); err != nil { t.Fatal(err) } - s = statsCtx.Result(time.Since(first), 0) + s = statsCtx.Result(time.Since(first), 0, 0) require.Equal(t, int64(expectedSize), s.Summary.TotalBytesProcessed) require.Equal(t, int64(inserted), s.Summary.TotalLinesProcessed) diff --git a/pkg/iter/entry_iterator_test.go b/pkg/iter/entry_iterator_test.go index 284b0ca2b1c3c..16d7ad9a69fd5 100644 --- a/pkg/iter/entry_iterator_test.go +++ b/pkg/iter/entry_iterator_test.go @@ -608,7 +608,7 @@ func Test_DuplicateCount(t *testing.T) { defer it.Close() for it.Next() { } - require.Equal(t, test.expectedDuplicates, stats.FromContext(ctx).Result(0, 0).TotalDuplicates()) + require.Equal(t, test.expectedDuplicates, stats.FromContext(ctx).Result(0, 0, 0).TotalDuplicates()) }) } } diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index c28eb75aa0423..781580eacab05 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -170,6 +170,20 @@ type query struct { record bool } +func (q *query) resultLength(res promql_parser.Value) int { + switch r := res.(type) { + case promql.Vector: + return len(r) + case promql.Matrix: + return r.TotalSamples() + case logqlmodel.Streams: + return int(r.Lines()) + default: + // for `scalar` or `string` or any other return type, we just return `0` as result length. + return 0 + } +} + // Exec Implements `Query`. It handles instrumentation & defers to Eval. func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { log, ctx := spanlogger.New(ctx, "query.Exec") @@ -187,7 +201,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) - statResult := statsCtx.Result(time.Since(start), queueTime) + statResult := statsCtx.Result(time.Since(start), queueTime, q.resultLength(data)) statResult.Log(level.Debug(log)) status := "200" @@ -202,7 +216,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { } if q.record { - RecordMetrics(ctx, q.logger, q.params, status, statResult, data) + RecordRangeAndInstantQueryMetrics(ctx, q.logger, q.params, status, statResult, data) } return logqlmodel.Result{ diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go index 11491cc9cd3a9..d918c65d0f5eb 100644 --- a/pkg/logql/metrics.go +++ b/pkg/logql/metrics.go @@ -3,6 +3,7 @@ package logql import ( "context" "strings" + "time" "github.com/dustin/go-humanize" "github.com/go-kit/log" @@ -23,6 +24,8 @@ const ( QueryTypeMetric = "metric" QueryTypeFilter = "filter" QueryTypeLimited = "limited" + QueryTypeLabels = "labels" + QueryTypeSeries = "series" latencyTypeSlow = "slow" latencyTypeFast = "fast" @@ -74,7 +77,14 @@ var ( linePerSecondLogUsage = usagestats.NewStatistics("query_log_lines_per_second") ) -func RecordMetrics(ctx context.Context, log log.Logger, p Params, status string, stats logql_stats.Result, result promql_parser.Value) { +func RecordRangeAndInstantQueryMetrics( + ctx context.Context, + log log.Logger, + p Params, + status string, + stats logql_stats.Result, + result promql_parser.Value, +) { var ( logger = util_log.WithContext(ctx, log) rt = string(GetRangeType(p)) @@ -113,13 +123,13 @@ func RecordMetrics(ctx context.Context, log log.Logger, p Params, status string, "returned_lines", returnedLines, "throughput", strings.Replace(humanize.Bytes(uint64(stats.Summary.BytesProcessedPerSecond)), " ", "", 1), "total_bytes", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalBytesProcessed)), " ", "", 1), + "total_entries", stats.Summary.TotalEntriesReturned, "queue_time", logql_stats.ConvertSecondsToNanoseconds(stats.Summary.QueueTime), "subqueries", stats.Summary.Subqueries, }...) logValues = append(logValues, tagsToKeyValues(queryTags)...) - // we also log queries, useful for troubleshooting slow queries. level.Info(logger).Log( logValues..., ) @@ -138,6 +148,94 @@ func RecordMetrics(ctx context.Context, log log.Logger, p Params, status string, recordUsageStats(queryType, stats) } +func RecordLabelQueryMetrics( + ctx context.Context, + log log.Logger, + start, end time.Time, + label, status string, + stats logql_stats.Result, +) { + var ( + logger = util_log.WithContext(ctx, log) + latencyType = latencyTypeFast + queryType = QueryTypeLabels + ) + + // Tag throughput metric by latency type based on a threshold. + // Latency below the threshold is fast, above is slow. + if stats.Summary.ExecTime > slowQueryThresholdSecond { + latencyType = latencyTypeSlow + } + + level.Info(logger).Log( + "latency", latencyType, + "query_type", queryType, + "length", end.Sub(start), + "duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))), + "status", status, + "label", label, + "throughput", strings.Replace(humanize.Bytes(uint64(stats.Summary.BytesProcessedPerSecond)), " ", "", 1), + "total_bytes", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalBytesProcessed)), " ", "", 1), + "total_entries", stats.Summary.TotalEntriesReturned, + ) + + bytesPerSecond.WithLabelValues(status, queryType, "", latencyType). + Observe(float64(stats.Summary.BytesProcessedPerSecond)) + execLatency.WithLabelValues(status, queryType, ""). + Observe(stats.Summary.ExecTime) + chunkDownloadLatency.WithLabelValues(status, queryType, ""). + Observe(stats.ChunksDownloadTime().Seconds()) + duplicatesTotal.Add(float64(stats.TotalDuplicates())) + chunkDownloadedTotal.WithLabelValues(status, queryType, ""). + Add(float64(stats.TotalChunksDownloaded())) + ingesterLineTotal.Add(float64(stats.Ingester.TotalLinesSent)) +} + +func RecordSeriesQueryMetrics( + ctx context.Context, + log log.Logger, + start, end time.Time, + match []string, + status string, + stats logql_stats.Result, +) { + var ( + logger = util_log.WithContext(ctx, log) + latencyType = latencyTypeFast + queryType = QueryTypeSeries + ) + + // Tag throughput metric by latency type based on a threshold. + // Latency below the threshold is fast, above is slow. + if stats.Summary.ExecTime > slowQueryThresholdSecond { + latencyType = latencyTypeSlow + } + + // we also log queries, useful for troubleshooting slow queries. + level.Info(logger).Log( + "latency", latencyType, + "query_type", queryType, + "length", end.Sub(start), + "duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))), + "status", status, + "match", strings.Join(match, ":"), // not using comma (,) as separator as matcher may already have comma (e.g: `{a="b", c="d"}`) + "throughput", strings.Replace(humanize.Bytes(uint64(stats.Summary.BytesProcessedPerSecond)), " ", "", 1), + "total_bytes", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalBytesProcessed)), " ", "", 1), + "total_entries", stats.Summary.TotalEntriesReturned, + ) + + bytesPerSecond.WithLabelValues(status, queryType, "", latencyType). + Observe(float64(stats.Summary.BytesProcessedPerSecond)) + execLatency.WithLabelValues(status, queryType, ""). + Observe(stats.Summary.ExecTime) + chunkDownloadLatency.WithLabelValues(status, queryType, ""). + Observe(stats.ChunksDownloadTime().Seconds()) + duplicatesTotal.Add(float64(stats.TotalDuplicates())) + chunkDownloadedTotal.WithLabelValues(status, queryType, ""). + Add(float64(stats.TotalChunksDownloaded())) + ingesterLineTotal.Add(float64(stats.Ingester.TotalLinesSent)) +} + func recordUsageStats(queryType string, stats logql_stats.Result) { if queryType == QueryTypeMetric { bytePerSecondMetricUsage.Record(float64(stats.Summary.BytesProcessedPerSecond)) diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go index 219a83dedbe48..46db6463a20bc 100644 --- a/pkg/logql/metrics_test.go +++ b/pkg/logql/metrics_test.go @@ -66,7 +66,7 @@ func TestLogSlowQuery(t *testing.T) { ctx = context.WithValue(ctx, httpreq.QueryTagsHTTPHeader, "Source=logvolhist,Feature=Beta") - RecordMetrics(ctx, util_log.Logger, LiteralParams{ + RecordRangeAndInstantQueryMetrics(ctx, util_log.Logger, LiteralParams{ qs: `{foo="bar"} |= "buzz"`, direction: logproto.BACKWARD, end: now, @@ -79,11 +79,64 @@ func TestLogSlowQuery(t *testing.T) { QueueTime: 0.000000002, ExecTime: 25.25, TotalBytesProcessed: 100000, + TotalEntriesReturned: 10, }, }, logqlmodel.Streams{logproto.Stream{Entries: make([]logproto.Entry, 10)}}) require.Equal(t, fmt.Sprintf( - "level=info org_id=foo traceID=%s latency=slow query=\"{foo=\\\"bar\\\"} |= \\\"buzz\\\"\" query_type=filter range_type=range length=1h0m0s step=1m0s duration=25.25s status=200 limit=1000 returned_lines=10 throughput=100kB total_bytes=100kB queue_time=2ns subqueries=0 source=logvolhist feature=beta\n", + "level=info org_id=foo traceID=%s latency=slow query=\"{foo=\\\"bar\\\"} |= \\\"buzz\\\"\" query_type=filter range_type=range length=1h0m0s step=1m0s duration=25.25s status=200 limit=1000 returned_lines=10 throughput=100kB total_bytes=100kB total_entries=10 queue_time=2ns subqueries=0 source=logvolhist feature=beta\n", + sp.Context().(jaeger.SpanContext).SpanID().String(), + ), + buf.String()) + util_log.Logger = log.NewNopLogger() +} + +func TestLogLabelsQuery(t *testing.T) { + buf := bytes.NewBufferString("") + logger := log.NewLogfmtLogger(buf) + tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter()) + defer c.Close() + opentracing.SetGlobalTracer(tr) + sp := opentracing.StartSpan("") + ctx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), "foo"), sp) + now := time.Now() + RecordLabelQueryMetrics(ctx, logger, now.Add(-1*time.Hour), now, "foo", "200", stats.Result{ + Summary: stats.Summary{ + BytesProcessedPerSecond: 100000, + ExecTime: 25.25, + TotalBytesProcessed: 100000, + TotalEntriesReturned: 12, + }, + }) + require.Equal(t, + fmt.Sprintf( + "level=info org_id=foo traceID=%s latency=slow query_type=labels length=1h0m0s duration=25.25s status=200 label=foo throughput=100kB total_bytes=100kB total_entries=12\n", + sp.Context().(jaeger.SpanContext).SpanID().String(), + ), + buf.String()) + util_log.Logger = log.NewNopLogger() +} + +func TestLogSeriesQuery(t *testing.T) { + buf := bytes.NewBufferString("") + logger := log.NewLogfmtLogger(buf) + tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter()) + defer c.Close() + opentracing.SetGlobalTracer(tr) + sp := opentracing.StartSpan("") + ctx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), "foo"), sp) + now := time.Now() + RecordSeriesQueryMetrics(ctx, logger, now.Add(-1*time.Hour), now, []string{`{container_name=~"prometheus.*", component="server"}`, `{app="loki"}`}, "200", stats.Result{ + Summary: stats.Summary{ + BytesProcessedPerSecond: 100000, + ExecTime: 25.25, + TotalBytesProcessed: 100000, + TotalEntriesReturned: 10, + }, + }) + require.Equal(t, + fmt.Sprintf( + "level=info org_id=foo traceID=%s latency=slow query_type=series length=1h0m0s duration=25.25s status=200 match=\"{container_name=~\\\"prometheus.*\\\", component=\\\"server\\\"}:{app=\\\"loki\\\"}\" throughput=100kB total_bytes=100kB total_entries=10\n", sp.Context().(jaeger.SpanContext).SpanID().String(), ), buf.String()) diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go index 6a842153622e4..1d23f76ef3216 100644 --- a/pkg/logqlmodel/stats/context.go +++ b/pkg/logqlmodel/stats/context.go @@ -15,7 +15,7 @@ To get the statistic from the current context you can use: Finally to get a snapshot of the current query statistic use - statsCtx.Result(time.Since(start)) + statsCtx.Result(time.Since(start), queueTime, totalEntriesReturned) */ package stats @@ -91,7 +91,7 @@ func (c *Context) Reset() { } // Result calculates the summary based on store and ingester data. -func (c *Context) Result(execTime time.Duration, queueTime time.Duration) Result { +func (c *Context) Result(execTime time.Duration, queueTime time.Duration, totalEntriesReturned int) Result { r := c.result r.Merge(Result{ @@ -101,7 +101,7 @@ func (c *Context) Result(execTime time.Duration, queueTime time.Duration) Result Ingester: c.ingester, }) - r.ComputeSummary(execTime, queueTime) + r.ComputeSummary(execTime, queueTime, totalEntriesReturned) return r } @@ -125,7 +125,7 @@ func JoinIngesters(ctx context.Context, inc Ingester) { } // ComputeSummary compute the summary of the statistics. -func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration) { +func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration, totalEntriesReturned int) { r.Summary.TotalBytesProcessed = r.Querier.Store.Chunk.DecompressedBytes + r.Querier.Store.Chunk.HeadChunkBytes + r.Ingester.Store.Chunk.DecompressedBytes + r.Ingester.Store.Chunk.HeadChunkBytes r.Summary.TotalLinesProcessed = r.Querier.Store.Chunk.DecompressedLines + r.Querier.Store.Chunk.HeadChunkLines + @@ -140,6 +140,8 @@ func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration) if queueTime != 0 { r.Summary.QueueTime = queueTime.Seconds() } + + r.Summary.TotalEntriesReturned = int64(totalEntriesReturned) } func (s *Store) Merge(m Store) { @@ -173,7 +175,7 @@ func (r *Result) Merge(m Result) { r.Querier.Merge(m.Querier) r.Ingester.Merge(m.Ingester) r.ComputeSummary(ConvertSecondsToNanoseconds(r.Summary.ExecTime+m.Summary.ExecTime), - ConvertSecondsToNanoseconds(r.Summary.QueueTime+m.Summary.QueueTime)) + ConvertSecondsToNanoseconds(r.Summary.QueueTime+m.Summary.QueueTime), int(r.Summary.TotalEntriesReturned)) } // ConvertSecondsToNanoseconds converts time.Duration representation of seconds (float64) diff --git a/pkg/logqlmodel/stats/context_test.go b/pkg/logqlmodel/stats/context_test.go index 1f4af90e92517..0b97024c9b88f 100644 --- a/pkg/logqlmodel/stats/context_test.go +++ b/pkg/logqlmodel/stats/context_test.go @@ -26,7 +26,7 @@ func TestResult(t *testing.T) { fakeIngesterQuery(ctx) fakeIngesterQuery(ctx) - res := stats.Result(2*time.Second, 2*time.Nanosecond) + res := stats.Result(2*time.Second, 2*time.Nanosecond, 10) res.Log(util_log.Logger) expected := Result{ Ingester: Ingester{ @@ -67,6 +67,7 @@ func TestResult(t *testing.T) { LinesProcessedPerSecond: int64(50), TotalBytesProcessed: int64(84), TotalLinesProcessed: int64(100), + TotalEntriesReturned: int64(10), Subqueries: 1, }, } @@ -114,12 +115,13 @@ func TestSnapshot_JoinResults(t *testing.T) { LinesProcessedPerSecond: int64(50), TotalBytesProcessed: int64(84), TotalLinesProcessed: int64(100), + TotalEntriesReturned: int64(10), Subqueries: 2, }, } JoinResults(ctx, expected) - res := statsCtx.Result(2*time.Second, 2*time.Nanosecond) + res := statsCtx.Result(2*time.Second, 2*time.Nanosecond, 10) require.Equal(t, expected, res) } @@ -243,10 +245,10 @@ func TestResult_Merge(t *testing.T) { func TestReset(t *testing.T) { statsCtx, ctx := NewContext(context.Background()) fakeIngesterQuery(ctx) - res := statsCtx.Result(2*time.Second, 2*time.Millisecond) + res := statsCtx.Result(2*time.Second, 2*time.Millisecond, 10) require.NotEmpty(t, res) statsCtx.Reset() - res = statsCtx.Result(0, 0) + res = statsCtx.Result(0, 0, 0) res.Summary.Subqueries = 0 require.Empty(t, res) } diff --git a/pkg/logqlmodel/stats/stats.pb.go b/pkg/logqlmodel/stats/stats.pb.go index 7a8f0fdf44b04..a8748ebe9ef52 100644 --- a/pkg/logqlmodel/stats/stats.pb.go +++ b/pkg/logqlmodel/stats/stats.pb.go @@ -106,6 +106,8 @@ type Summary struct { QueueTime float64 `protobuf:"fixed64,6,opt,name=queueTime,proto3" json:"queueTime"` // Total of subqueries created to fulfill this query. Subqueries int64 `protobuf:"varint,7,opt,name=subqueries,proto3" json:"subqueries"` + // Total number of result entries returned + TotalEntriesReturned int64 `protobuf:"varint,8,opt,name=totalEntriesReturned,proto3" json:"totalEntriesReturned"` } func (m *Summary) Reset() { *m = Summary{} } @@ -189,6 +191,13 @@ func (m *Summary) GetSubqueries() int64 { return 0 } +func (m *Summary) GetTotalEntriesReturned() int64 { + if m != nil { + return m.TotalEntriesReturned + } + return 0 +} + type Querier struct { Store Store `protobuf:"bytes,1,opt,name=store,proto3" json:"store"` } @@ -482,53 +491,55 @@ func init() { func init() { proto.RegisterFile("pkg/logqlmodel/stats/stats.proto", fileDescriptor_6cdfe5d2aea33ebb) } var fileDescriptor_6cdfe5d2aea33ebb = []byte{ - // 734 bytes of a gzipped FileDescriptorProto + // 759 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xd3, 0x4c, - 0x10, 0xb6, 0x93, 0xd7, 0x49, 0xba, 0x6f, 0xbf, 0xd8, 0xaa, 0x34, 0x80, 0x64, 0x57, 0x39, 0x55, - 0x02, 0x12, 0xf1, 0x71, 0x01, 0xd1, 0x8b, 0x5b, 0x21, 0x55, 0x02, 0x51, 0xa6, 0x70, 0xe1, 0xe6, - 0x38, 0xdb, 0xc4, 0xaa, 0xe3, 0x4d, 0xfd, 0x21, 0xe8, 0x8d, 0x1b, 0x47, 0xf8, 0x19, 0x5c, 0xf8, - 0x09, 0xdc, 0x7b, 0xec, 0x81, 0x43, 0x4f, 0x16, 0x75, 0x2f, 0xc8, 0xa7, 0xfe, 0x04, 0xe4, 0x59, - 0xc7, 0x8e, 0x1d, 0x47, 0xe2, 0x92, 0xcc, 0x3c, 0xcf, 0x3c, 0x33, 0xeb, 0x99, 0x59, 0x2d, 0xd9, - 0x9e, 0x9c, 0x0c, 0x7b, 0x36, 0x1f, 0x9e, 0xda, 0x63, 0x3e, 0x60, 0x76, 0xcf, 0xf3, 0x0d, 0xdf, - 0x13, 0xbf, 0xdd, 0x89, 0xcb, 0x7d, 0x4e, 0x15, 0x74, 0xee, 0x3e, 0x1c, 0x5a, 0xfe, 0x28, 0xe8, - 0x77, 0x4d, 0x3e, 0xee, 0x0d, 0xf9, 0x90, 0xf7, 0x90, 0xed, 0x07, 0xc7, 0xe8, 0xa1, 0x83, 0x96, - 0x50, 0x75, 0x7e, 0xca, 0xa4, 0x01, 0xcc, 0x0b, 0x6c, 0x9f, 0x3e, 0x23, 0x4d, 0x2f, 0x18, 0x8f, - 0x0d, 0xf7, 0xac, 0x2d, 0x6f, 0xcb, 0x3b, 0xff, 0x3f, 0x5e, 0xed, 0x8a, 0xfc, 0x47, 0x02, 0xd5, - 0xd7, 0xce, 0x43, 0x4d, 0x8a, 0x43, 0x6d, 0x1a, 0x06, 0x53, 0x23, 0x91, 0x9e, 0x06, 0xcc, 0xb5, - 0x98, 0xdb, 0xae, 0x15, 0xa4, 0x6f, 0x05, 0x9a, 0x4b, 0xd3, 0x30, 0x98, 0x1a, 0x74, 0x97, 0xb4, - 0x2c, 0x67, 0xc8, 0x3c, 0x9f, 0xb9, 0xed, 0x3a, 0x6a, 0xd7, 0x52, 0xed, 0x41, 0x0a, 0xeb, 0xeb, - 0xa9, 0x38, 0x0b, 0x84, 0xcc, 0xea, 0xfc, 0xaa, 0x93, 0x66, 0x7a, 0x3e, 0xfa, 0x9e, 0x6c, 0xf5, - 0xcf, 0x7c, 0xe6, 0x1d, 0xba, 0xdc, 0x64, 0x9e, 0xc7, 0x06, 0x87, 0xcc, 0x3d, 0x62, 0x26, 0x77, - 0x06, 0xf8, 0x41, 0x75, 0xfd, 0x5e, 0x1c, 0x6a, 0x8b, 0x42, 0x60, 0x11, 0x91, 0xa4, 0xb5, 0x2d, - 0xa7, 0x32, 0x6d, 0x2d, 0x4f, 0xbb, 0x20, 0x04, 0x16, 0x11, 0xf4, 0x80, 0x6c, 0xf8, 0xdc, 0x37, - 0x6c, 0xbd, 0x50, 0x16, 0x7b, 0x50, 0xd7, 0xb7, 0xe2, 0x50, 0xab, 0xa2, 0xa1, 0x0a, 0xcc, 0x52, - 0xbd, 0x2a, 0x94, 0x6a, 0xff, 0x57, 0x4a, 0x55, 0xa4, 0xa1, 0x0a, 0xa4, 0x3b, 0xa4, 0xc5, 0x3e, - 0x31, 0xf3, 0x9d, 0x35, 0x66, 0x6d, 0x65, 0x5b, 0xde, 0x91, 0xf5, 0xe5, 0xa4, 0xf3, 0x53, 0x0c, - 0x32, 0x8b, 0xde, 0x27, 0x4b, 0xa7, 0x01, 0x0b, 0x18, 0x86, 0x36, 0x30, 0x74, 0x25, 0x0e, 0xb5, - 0x1c, 0x84, 0xdc, 0xa4, 0x5d, 0x42, 0xbc, 0xa0, 0x2f, 0x66, 0xee, 0xb5, 0x9b, 0x78, 0xb0, 0xd5, - 0x38, 0xd4, 0x66, 0x50, 0x98, 0xb1, 0x3b, 0x2f, 0x48, 0x33, 0x5d, 0x1d, 0xfa, 0x88, 0x28, 0x9e, - 0xcf, 0x5d, 0x96, 0x2e, 0xe5, 0xf2, 0x74, 0x29, 0x13, 0x4c, 0x5f, 0x49, 0x57, 0x43, 0x84, 0x80, - 0xf8, 0xeb, 0xfc, 0xa8, 0x91, 0xd6, 0x74, 0x7b, 0xe8, 0x53, 0xb2, 0x8c, 0x1f, 0x0a, 0xcc, 0x30, - 0x47, 0x4c, 0xac, 0x82, 0xa2, 0xaf, 0xc7, 0xa1, 0x56, 0xc0, 0xa1, 0xe0, 0xd1, 0x97, 0x84, 0xa2, - 0xbf, 0x37, 0x0a, 0x9c, 0x13, 0xef, 0xb5, 0xe1, 0xa3, 0x56, 0xcc, 0xfb, 0x76, 0x1c, 0x6a, 0x15, - 0x2c, 0x54, 0x60, 0x59, 0x75, 0x1d, 0x7d, 0x2f, 0x1d, 0x6f, 0x5e, 0x3d, 0xc5, 0xa1, 0xe0, 0xd1, - 0xe7, 0x64, 0x35, 0x1f, 0xce, 0x11, 0x73, 0xfc, 0x74, 0x96, 0x34, 0x0e, 0xb5, 0x12, 0x03, 0x25, - 0x3f, 0xef, 0x97, 0xf2, 0xcf, 0xfd, 0xfa, 0x5a, 0x23, 0x0a, 0xf2, 0x59, 0x61, 0xf1, 0x11, 0xc0, - 0x8e, 0xd3, 0x9b, 0x93, 0x17, 0xce, 0x18, 0x28, 0xf9, 0xf4, 0x0d, 0xd9, 0x9c, 0x41, 0xf6, 0xf9, - 0x47, 0xc7, 0xe6, 0xc6, 0x20, 0xeb, 0xda, 0x9d, 0x38, 0xd4, 0xaa, 0x03, 0xa0, 0x1a, 0x4e, 0x66, - 0x60, 0x16, 0x30, 0x5c, 0xb5, 0x7a, 0x3e, 0x83, 0x79, 0x16, 0x2a, 0xb0, 0xa4, 0x23, 0x88, 0x62, - 0x13, 0xf3, 0x8e, 0x60, 0xbd, 0xbc, 0x23, 0x18, 0x02, 0xe2, 0xaf, 0xf3, 0xa5, 0x4e, 0x14, 0xe4, - 0x93, 0x8e, 0x8c, 0x98, 0x31, 0x10, 0xc1, 0xc9, 0xb5, 0x9b, 0x1d, 0x45, 0x91, 0x81, 0x92, 0x5f, - 0xd0, 0xe2, 0x80, 0x70, 0x26, 0x65, 0x2d, 0x32, 0x50, 0xf2, 0xe9, 0x1e, 0xb9, 0x35, 0x60, 0x26, - 0x1f, 0x4f, 0x5c, 0xbc, 0x98, 0xa2, 0x74, 0x03, 0xe5, 0x9b, 0x71, 0xa8, 0xcd, 0x93, 0x30, 0x0f, - 0x95, 0x93, 0x88, 0x33, 0x34, 0xab, 0x93, 0x88, 0x63, 0xcc, 0x43, 0x74, 0x97, 0xac, 0x95, 0xcf, - 0xd1, 0xc2, 0x14, 0x1b, 0x71, 0xa8, 0x95, 0x29, 0x28, 0x03, 0x89, 0x1c, 0xc7, 0xbb, 0x1f, 0x4c, - 0x6c, 0xcb, 0x34, 0x12, 0xf9, 0x52, 0x2e, 0x2f, 0x51, 0x50, 0x06, 0xf4, 0xfe, 0xc5, 0x95, 0x2a, - 0x5d, 0x5e, 0xa9, 0xd2, 0xcd, 0x95, 0x2a, 0x7f, 0x8e, 0x54, 0xf9, 0x7b, 0xa4, 0xca, 0xe7, 0x91, - 0x2a, 0x5f, 0x44, 0xaa, 0xfc, 0x3b, 0x52, 0xe5, 0x3f, 0x91, 0x2a, 0xdd, 0x44, 0xaa, 0xfc, 0xed, - 0x5a, 0x95, 0x2e, 0xae, 0x55, 0xe9, 0xf2, 0x5a, 0x95, 0x3e, 0x3c, 0x98, 0x7d, 0x05, 0x5d, 0xe3, - 0xd8, 0x70, 0x8c, 0x9e, 0xcd, 0x4f, 0xac, 0x5e, 0xd5, 0x33, 0xda, 0x6f, 0xe0, 0x5b, 0xf8, 0xe4, - 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x99, 0x9a, 0xa3, 0x65, 0x07, 0x00, 0x00, + 0x10, 0x8e, 0x93, 0xe6, 0xa3, 0xfb, 0xf6, 0xeb, 0xdd, 0x52, 0x6a, 0x40, 0xb2, 0xab, 0x9c, 0x2a, + 0x01, 0x89, 0xf8, 0xb8, 0x80, 0xe8, 0xc5, 0x2d, 0x48, 0x95, 0x8a, 0x28, 0x53, 0xb8, 0x70, 0x73, + 0x9c, 0x6d, 0x12, 0xd5, 0xf1, 0xa6, 0xf6, 0x5a, 0xd0, 0x1b, 0x37, 0x8e, 0xf0, 0x1b, 0x38, 0x71, + 0xe1, 0x27, 0x70, 0xef, 0xb1, 0xc7, 0x9e, 0x2c, 0x9a, 0x5e, 0x90, 0x4f, 0xfd, 0x09, 0xc8, 0xb3, + 0x8e, 0x1d, 0x3b, 0x8e, 0xc4, 0x25, 0x99, 0x79, 0x9e, 0x79, 0x66, 0xd6, 0x33, 0xb3, 0x5a, 0xb2, + 0x35, 0x3a, 0xe9, 0xb5, 0x6d, 0xde, 0x3b, 0xb5, 0x87, 0xbc, 0xcb, 0xec, 0xb6, 0x27, 0x4c, 0xe1, + 0xc9, 0xdf, 0xd6, 0xc8, 0xe5, 0x82, 0xd3, 0x2a, 0x3a, 0x77, 0x1f, 0xf6, 0x06, 0xa2, 0xef, 0x77, + 0x5a, 0x16, 0x1f, 0xb6, 0x7b, 0xbc, 0xc7, 0xdb, 0xc8, 0x76, 0xfc, 0x63, 0xf4, 0xd0, 0x41, 0x4b, + 0xaa, 0x9a, 0xbf, 0x14, 0x52, 0x03, 0xe6, 0xf9, 0xb6, 0xa0, 0xcf, 0x48, 0xdd, 0xf3, 0x87, 0x43, + 0xd3, 0x3d, 0x53, 0x95, 0x2d, 0x65, 0xfb, 0xbf, 0xc7, 0x2b, 0x2d, 0x99, 0xff, 0x48, 0xa2, 0xc6, + 0xea, 0x79, 0xa0, 0x97, 0xc2, 0x40, 0x9f, 0x84, 0xc1, 0xc4, 0x88, 0xa4, 0xa7, 0x3e, 0x73, 0x07, + 0xcc, 0x55, 0xcb, 0x19, 0xe9, 0x5b, 0x89, 0xa6, 0xd2, 0x38, 0x0c, 0x26, 0x06, 0xdd, 0x21, 0x8d, + 0x81, 0xd3, 0x63, 0x9e, 0x60, 0xae, 0x5a, 0x41, 0xed, 0x6a, 0xac, 0xdd, 0x8f, 0x61, 0x63, 0x2d, + 0x16, 0x27, 0x81, 0x90, 0x58, 0xcd, 0xef, 0x0b, 0xa4, 0x1e, 0x9f, 0x8f, 0xbe, 0x27, 0x9b, 0x9d, + 0x33, 0xc1, 0xbc, 0x43, 0x97, 0x5b, 0xcc, 0xf3, 0x58, 0xf7, 0x90, 0xb9, 0x47, 0xcc, 0xe2, 0x4e, + 0x17, 0x3f, 0xa8, 0x62, 0xdc, 0x0b, 0x03, 0x7d, 0x5e, 0x08, 0xcc, 0x23, 0xa2, 0xb4, 0xf6, 0xc0, + 0x29, 0x4c, 0x5b, 0x4e, 0xd3, 0xce, 0x09, 0x81, 0x79, 0x04, 0xdd, 0x27, 0xeb, 0x82, 0x0b, 0xd3, + 0x36, 0x32, 0x65, 0xb1, 0x07, 0x15, 0x63, 0x33, 0x0c, 0xf4, 0x22, 0x1a, 0x8a, 0xc0, 0x24, 0xd5, + 0x41, 0xa6, 0x94, 0xba, 0x90, 0x4b, 0x95, 0xa5, 0xa1, 0x08, 0xa4, 0xdb, 0xa4, 0xc1, 0x3e, 0x31, + 0xeb, 0xdd, 0x60, 0xc8, 0xd4, 0xea, 0x96, 0xb2, 0xad, 0x18, 0x4b, 0x51, 0xe7, 0x27, 0x18, 0x24, + 0x16, 0xbd, 0x4f, 0x16, 0x4f, 0x7d, 0xe6, 0x33, 0x0c, 0xad, 0x61, 0xe8, 0x72, 0x18, 0xe8, 0x29, + 0x08, 0xa9, 0x49, 0x5b, 0x84, 0x78, 0x7e, 0x47, 0xce, 0xdc, 0x53, 0xeb, 0x78, 0xb0, 0x95, 0x30, + 0xd0, 0xa7, 0x50, 0x98, 0xb2, 0xe9, 0x01, 0xb9, 0x85, 0xa7, 0x7b, 0xe9, 0x08, 0xe4, 0x98, 0xf0, + 0x5d, 0x87, 0x75, 0xd5, 0x06, 0x2a, 0xd5, 0x30, 0xd0, 0x0b, 0x79, 0x28, 0x44, 0x9b, 0x2f, 0x48, + 0x3d, 0x5e, 0x44, 0xfa, 0x88, 0x54, 0x3d, 0xc1, 0x5d, 0x16, 0xaf, 0xf8, 0xd2, 0x64, 0xc5, 0x23, + 0xcc, 0x58, 0x8e, 0x17, 0x4d, 0x86, 0x80, 0xfc, 0x6b, 0xfe, 0x2c, 0x93, 0xc6, 0x64, 0x17, 0xe9, + 0x53, 0xb2, 0x84, 0x25, 0x80, 0x99, 0x56, 0x9f, 0xc9, 0xc5, 0xaa, 0x1a, 0x6b, 0x61, 0xa0, 0x67, + 0x70, 0xc8, 0x78, 0xf4, 0x15, 0xa1, 0xe8, 0xef, 0xf6, 0x7d, 0xe7, 0xc4, 0x7b, 0x6d, 0x0a, 0xd4, + 0xca, 0xed, 0xb9, 0x1d, 0x06, 0x7a, 0x01, 0x0b, 0x05, 0x58, 0x52, 0xdd, 0x40, 0xdf, 0x8b, 0x97, + 0x25, 0xad, 0x1e, 0xe3, 0x90, 0xf1, 0xe8, 0x73, 0xb2, 0x92, 0x8e, 0xfa, 0x88, 0x39, 0x22, 0xde, + 0x0c, 0x1a, 0x06, 0x7a, 0x8e, 0x81, 0x9c, 0x9f, 0xf6, 0xab, 0xfa, 0xcf, 0xfd, 0xfa, 0x5a, 0x26, + 0x55, 0xe4, 0x93, 0xc2, 0xf2, 0x23, 0x80, 0x1d, 0xc7, 0xf7, 0x30, 0x2d, 0x9c, 0x30, 0x90, 0xf3, + 0xe9, 0x1b, 0xb2, 0x31, 0x85, 0xec, 0xf1, 0x8f, 0x8e, 0xcd, 0xcd, 0x6e, 0xd2, 0xb5, 0x3b, 0x61, + 0xa0, 0x17, 0x07, 0x40, 0x31, 0x1c, 0xcd, 0xc0, 0xca, 0x60, 0xb8, 0xb8, 0x95, 0x74, 0x06, 0xb3, + 0x2c, 0x14, 0x60, 0x51, 0x47, 0x10, 0xc5, 0x26, 0xa6, 0x1d, 0xc1, 0x7a, 0x69, 0x47, 0x30, 0x04, + 0xe4, 0x5f, 0xf3, 0x4b, 0x85, 0x54, 0x91, 0x8f, 0x3a, 0xd2, 0x67, 0x66, 0x57, 0x06, 0x47, 0x97, + 0x78, 0x7a, 0x14, 0x59, 0x06, 0x72, 0x7e, 0x46, 0x8b, 0x03, 0xc2, 0x99, 0xe4, 0xb5, 0xc8, 0x40, + 0xce, 0xa7, 0xbb, 0xe4, 0xff, 0x2e, 0xb3, 0xf8, 0x70, 0xe4, 0xe2, 0x35, 0x97, 0xa5, 0x6b, 0x28, + 0xdf, 0x08, 0x03, 0x7d, 0x96, 0x84, 0x59, 0x28, 0x9f, 0x44, 0x9e, 0xa1, 0x5e, 0x9c, 0x44, 0x1e, + 0x63, 0x16, 0xa2, 0x3b, 0x64, 0x35, 0x7f, 0x0e, 0x79, 0xa9, 0xd7, 0xc3, 0x40, 0xcf, 0x53, 0x90, + 0x07, 0x22, 0x39, 0x8e, 0x77, 0xcf, 0x1f, 0xd9, 0x03, 0xcb, 0x8c, 0xe4, 0x8b, 0xa9, 0x3c, 0x47, + 0x41, 0x1e, 0x30, 0x3a, 0x17, 0x57, 0x5a, 0xe9, 0xf2, 0x4a, 0x2b, 0xdd, 0x5c, 0x69, 0xca, 0xe7, + 0xb1, 0xa6, 0xfc, 0x18, 0x6b, 0xca, 0xf9, 0x58, 0x53, 0x2e, 0xc6, 0x9a, 0xf2, 0x7b, 0xac, 0x29, + 0x7f, 0xc6, 0x5a, 0xe9, 0x66, 0xac, 0x29, 0xdf, 0xae, 0xb5, 0xd2, 0xc5, 0xb5, 0x56, 0xba, 0xbc, + 0xd6, 0x4a, 0x1f, 0x1e, 0x4c, 0xbf, 0xa9, 0xae, 0x79, 0x6c, 0x3a, 0x66, 0xdb, 0xe6, 0x27, 0x83, + 0x76, 0xd1, 0xa3, 0xdc, 0xa9, 0xe1, 0xcb, 0xfa, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9a, + 0x84, 0xb3, 0x79, 0xb3, 0x07, 0x00, 0x00, } func (this *Result) Equal(that interface{}) bool { @@ -601,6 +612,9 @@ func (this *Summary) Equal(that interface{}) bool { if this.Subqueries != that1.Subqueries { return false } + if this.TotalEntriesReturned != that1.TotalEntriesReturned { + return false + } return true } func (this *Querier) Equal(that interface{}) bool { @@ -751,7 +765,7 @@ func (this *Summary) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 12) s = append(s, "&stats.Summary{") s = append(s, "BytesProcessedPerSecond: "+fmt.Sprintf("%#v", this.BytesProcessedPerSecond)+",\n") s = append(s, "LinesProcessedPerSecond: "+fmt.Sprintf("%#v", this.LinesProcessedPerSecond)+",\n") @@ -760,6 +774,7 @@ func (this *Summary) GoString() string { s = append(s, "ExecTime: "+fmt.Sprintf("%#v", this.ExecTime)+",\n") s = append(s, "QueueTime: "+fmt.Sprintf("%#v", this.QueueTime)+",\n") s = append(s, "Subqueries: "+fmt.Sprintf("%#v", this.Subqueries)+",\n") + s = append(s, "TotalEntriesReturned: "+fmt.Sprintf("%#v", this.TotalEntriesReturned)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -896,6 +911,11 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TotalEntriesReturned != 0 { + i = encodeVarintStats(dAtA, i, uint64(m.TotalEntriesReturned)) + i-- + dAtA[i] = 0x40 + } if m.Subqueries != 0 { i = encodeVarintStats(dAtA, i, uint64(m.Subqueries)) i-- @@ -1176,6 +1196,9 @@ func (m *Summary) Size() (n int) { if m.Subqueries != 0 { n += 1 + sovStats(uint64(m.Subqueries)) } + if m.TotalEntriesReturned != 0 { + n += 1 + sovStats(uint64(m.TotalEntriesReturned)) + } return n } @@ -1290,6 +1313,7 @@ func (this *Summary) String() string { `ExecTime:` + fmt.Sprintf("%v", this.ExecTime) + `,`, `QueueTime:` + fmt.Sprintf("%v", this.QueueTime) + `,`, `Subqueries:` + fmt.Sprintf("%v", this.Subqueries) + `,`, + `TotalEntriesReturned:` + fmt.Sprintf("%v", this.TotalEntriesReturned) + `,`, `}`, }, "") return s @@ -1652,6 +1676,25 @@ func (m *Summary) Unmarshal(dAtA []byte) error { break } } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalEntriesReturned", wireType) + } + m.TotalEntriesReturned = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalEntriesReturned |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipStats(dAtA[iNdEx:]) diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto index 00fc806b2d54e..657c8e2545d1a 100644 --- a/pkg/logqlmodel/stats/stats.proto +++ b/pkg/logqlmodel/stats/stats.proto @@ -44,6 +44,8 @@ message Summary { double queueTime = 6 [(gogoproto.jsontag) = "queueTime"]; // Total of subqueries created to fulfill this query. int64 subqueries = 7 [(gogoproto.jsontag) = "subqueries"]; + // Total number of result entries returned + int64 totalEntriesReturned = 8 [(gogoproto.jsontag) = "totalEntriesReturned"]; } message Querier { diff --git a/pkg/querier/http.go b/pkg/querier/http.go index a7e8d153b30f1..986cb1ea2bde5 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -3,6 +3,7 @@ package querier import ( "context" "net/http" + "strconv" "time" "github.com/go-kit/log" @@ -19,10 +20,14 @@ import ( "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/util/httpreq" util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/marshal" marshal_legacy "github.com/grafana/loki/pkg/util/marshal/legacy" + "github.com/grafana/loki/pkg/util/server" serverutil "github.com/grafana/loki/pkg/util/server" + "github.com/grafana/loki/pkg/util/spanlogger" util_validation "github.com/grafana/loki/pkg/util/validation" "github.com/grafana/loki/pkg/validation" ) @@ -200,7 +205,25 @@ func (q *QuerierAPI) LabelHandler(w http.ResponseWriter, r *http.Request) { return } + log, ctx := spanlogger.New(r.Context(), "query.Label") + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + resp, err := q.querier.Label(r.Context(), req) + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + + // record stats about the label query + statResult := statsCtx.Result(time.Since(start), queueTime, len(resp.Values)) + statResult.Log(level.Debug(log)) + + status := 200 + if err != nil { + status, _ = server.ClientHTTPStatusAndError(err) + } + + logql.RecordLabelQueryMetrics(ctx, log, *req.Start, *req.End, req.Name, strconv.Itoa(status), statResult) + if err != nil { serverutil.WriteError(err, w) return @@ -350,7 +373,24 @@ func (q *QuerierAPI) SeriesHandler(w http.ResponseWriter, r *http.Request) { return } + log, ctx := spanlogger.New(r.Context(), "query.Series") + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + resp, err := q.querier.Series(r.Context(), req) + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + + // record stats about the label query + statResult := statsCtx.Result(time.Since(start), queueTime, len(resp.Series)) + statResult.Log(level.Debug(log)) + + status := 200 + if err != nil { + status, _ = server.ClientHTTPStatusAndError(err) + } + + logql.RecordSeriesQueryMetrics(ctx, log, req.Start, req.End, req.Groups, strconv.Itoa(status), statResult) if err != nil { serverutil.WriteError(err, w) return diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 4d140a871da42..36f231e136587 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -779,8 +779,16 @@ func paramsFromRequest(req queryrangebase.Request) (logql.Params, error) { return ¶msInstantWrapper{ LokiInstantRequest: r, }, nil + case *LokiSeriesRequest: + return ¶msSeriesWrapper{ + LokiSeriesRequest: r, + }, nil + case *LokiLabelNamesRequest: + return ¶msLabelNamesWrapper{ + LokiLabelNamesRequest: r, + }, nil default: - return nil, fmt.Errorf("expected *LokiRequest or *LokiInstantRequest, got (%T)", r) + return nil, fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, got (%T)", r) } } @@ -842,6 +850,62 @@ func (p paramsInstantWrapper) Shards() []string { return p.GetShards() } +type paramsSeriesWrapper struct { + *LokiSeriesRequest +} + +func (p paramsSeriesWrapper) Query() string { + return p.GetQuery() +} + +func (p paramsSeriesWrapper) Start() time.Time { + return p.LokiSeriesRequest.GetStartTs() +} + +func (p paramsSeriesWrapper) End() time.Time { + return p.LokiSeriesRequest.GetEndTs() +} + +func (p paramsSeriesWrapper) Step() time.Duration { + return time.Duration(p.GetStep() * 1e6) +} +func (p paramsSeriesWrapper) Interval() time.Duration { return 0 } +func (p paramsSeriesWrapper) Direction() logproto.Direction { + return logproto.FORWARD +} +func (p paramsSeriesWrapper) Limit() uint32 { return 0 } +func (p paramsSeriesWrapper) Shards() []string { + return p.GetShards() +} + +type paramsLabelNamesWrapper struct { + *LokiLabelNamesRequest +} + +func (p paramsLabelNamesWrapper) Query() string { + return p.GetQuery() +} + +func (p paramsLabelNamesWrapper) Start() time.Time { + return p.LokiLabelNamesRequest.GetStartTs() +} + +func (p paramsLabelNamesWrapper) End() time.Time { + return p.LokiLabelNamesRequest.GetEndTs() +} + +func (p paramsLabelNamesWrapper) Step() time.Duration { + return time.Duration(p.GetStep() * 1e6) +} +func (p paramsLabelNamesWrapper) Interval() time.Duration { return 0 } +func (p paramsLabelNamesWrapper) Direction() logproto.Direction { + return logproto.FORWARD +} +func (p paramsLabelNamesWrapper) Limit() uint32 { return 0 } +func (p paramsLabelNamesWrapper) Shards() []string { + return make([]string, 0) +} + func httpResponseHeadersToPromResponseHeaders(httpHeaders http.Header) []queryrangebase.PrometheusResponseHeader { var promHeaders []queryrangebase.PrometheusResponseHeader for h, hv := range httpHeaders { diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index bcdfd120a072b..8520e92b56952 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -934,6 +934,7 @@ var ( "queueTime": 21, "subqueries": 1, "totalBytesProcessed": 24, + "totalEntriesReturned": 10, "totalLinesProcessed": 25 } },` @@ -1086,6 +1087,7 @@ var ( TotalBytesProcessed: 24, Subqueries: 1, TotalLinesProcessed: 25, + TotalEntriesReturned: 10, }, Querier: stats.Querier{ Store: stats.Store{ diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go index 809353d130dc9..aff5172f634d8 100644 --- a/pkg/querier/queryrange/prometheus_test.go +++ b/pkg/querier/queryrange/prometheus_test.go @@ -54,6 +54,7 @@ var emptyStats = `"stats": { "queueTime": 0, "subqueries": 0, "totalBytesProcessed":0, + "totalEntriesReturned":0, "totalLinesProcessed":0 } }` diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index 3765740897345..9fa1efe5af7d8 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -400,10 +400,11 @@ func (m *LokiSeriesRequest) GetShards() []string { } type LokiSeriesResponse struct { - Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` - Data []logproto.SeriesIdentifier `protobuf:"bytes,2,rep,name=Data,proto3" json:"data,omitempty"` - Version uint32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` - Headers []github_com_grafana_loki_pkg_querier_queryrange_queryrangebase.PrometheusResponseHeader `protobuf:"bytes,4,rep,name=Headers,proto3,customtype=github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" json:"-"` + Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` + Data []logproto.SeriesIdentifier `protobuf:"bytes,2,rep,name=Data,proto3" json:"data,omitempty"` + Version uint32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + Headers []github_com_grafana_loki_pkg_querier_queryrange_queryrangebase.PrometheusResponseHeader `protobuf:"bytes,4,rep,name=Headers,proto3,customtype=github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" json:"-"` + Statistics stats.Result `protobuf:"bytes,5,opt,name=statistics,proto3" json:"statistics"` } func (m *LokiSeriesResponse) Reset() { *m = LokiSeriesResponse{} } @@ -459,6 +460,13 @@ func (m *LokiSeriesResponse) GetVersion() uint32 { return 0 } +func (m *LokiSeriesResponse) GetStatistics() stats.Result { + if m != nil { + return m.Statistics + } + return stats.Result{} +} + type LokiLabelNamesRequest struct { StartTs time.Time `protobuf:"bytes,1,opt,name=startTs,proto3,stdtime" json:"startTs"` EndTs time.Time `protobuf:"bytes,2,opt,name=endTs,proto3,stdtime" json:"endTs"` @@ -519,10 +527,11 @@ func (m *LokiLabelNamesRequest) GetPath() string { } type LokiLabelNamesResponse struct { - Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` - Data []string `protobuf:"bytes,2,rep,name=Data,proto3" json:"data,omitempty"` - Version uint32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` - Headers []github_com_grafana_loki_pkg_querier_queryrange_queryrangebase.PrometheusResponseHeader `protobuf:"bytes,4,rep,name=Headers,proto3,customtype=github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" json:"-"` + Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` + Data []string `protobuf:"bytes,2,rep,name=Data,proto3" json:"data,omitempty"` + Version uint32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + Headers []github_com_grafana_loki_pkg_querier_queryrange_queryrangebase.PrometheusResponseHeader `protobuf:"bytes,4,rep,name=Headers,proto3,customtype=github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" json:"-"` + Statistics stats.Result `protobuf:"bytes,5,opt,name=statistics,proto3" json:"statistics"` } func (m *LokiLabelNamesResponse) Reset() { *m = LokiLabelNamesResponse{} } @@ -578,6 +587,13 @@ func (m *LokiLabelNamesResponse) GetVersion() uint32 { return 0 } +func (m *LokiLabelNamesResponse) GetStatistics() stats.Result { + if m != nil { + return m.Statistics + } + return stats.Result{} +} + type LokiData struct { ResultType string `protobuf:"bytes,1,opt,name=ResultType,proto3" json:"resultType"` Result []github_com_grafana_loki_pkg_logproto.Stream `protobuf:"bytes,2,rep,name=Result,proto3,customtype=github.com/grafana/loki/pkg/logproto.Stream" json:"result"` @@ -691,65 +707,65 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 915 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0x4f, 0x6f, 0x23, 0x35, - 0x14, 0x8f, 0xf3, 0x7f, 0x5c, 0xb6, 0x80, 0xbb, 0xec, 0x8e, 0x8a, 0x34, 0x13, 0xe5, 0x00, 0x41, - 0xb0, 0x13, 0xd1, 0x05, 0x0e, 0x08, 0x10, 0x3b, 0x2a, 0x88, 0x95, 0x56, 0x08, 0x79, 0x23, 0xae, - 0xc8, 0x69, 0xdc, 0x64, 0xd4, 0x99, 0xf1, 0xd4, 0x76, 0x56, 0xea, 0x8d, 0x2f, 0x80, 0xb4, 0x9f, - 0x01, 0x90, 0x40, 0x7c, 0x0a, 0x8e, 0x3d, 0xf6, 0xb8, 0x5a, 0x89, 0x81, 0xa6, 0x17, 0xc8, 0x69, - 0x3f, 0x02, 0xb2, 0x3d, 0x93, 0x38, 0x4b, 0x4b, 0x9b, 0xed, 0x05, 0x71, 0x49, 0xfc, 0xec, 0xf7, - 0xf3, 0xf8, 0xf7, 0x7b, 0xbf, 0xf7, 0xe0, 0x9b, 0xd9, 0xc1, 0xb8, 0x7f, 0x38, 0xa5, 0x3c, 0xa2, - 0x5c, 0xff, 0x1f, 0x71, 0x92, 0x8e, 0xa9, 0xb5, 0x0c, 0x32, 0xce, 0x24, 0x43, 0x70, 0xb9, 0xb3, - 0x7d, 0x67, 0x1c, 0xc9, 0xc9, 0x74, 0x18, 0xec, 0xb1, 0xa4, 0x3f, 0x66, 0x63, 0xd6, 0xd7, 0x29, - 0xc3, 0xe9, 0xbe, 0x8e, 0x74, 0xa0, 0x57, 0x06, 0xba, 0xed, 0x8f, 0x19, 0x1b, 0xc7, 0x74, 0x99, - 0x25, 0xa3, 0x84, 0x0a, 0x49, 0x92, 0xac, 0x48, 0x78, 0x5d, 0x3d, 0x22, 0x66, 0x63, 0x83, 0x2c, - 0x17, 0xc5, 0x61, 0xa7, 0x38, 0x3c, 0x8c, 0x13, 0x36, 0xa2, 0x71, 0x5f, 0x48, 0x22, 0x85, 0xf9, - 0x2d, 0x32, 0x3e, 0xb8, 0x94, 0xc3, 0x90, 0x88, 0x7f, 0x52, 0xea, 0x9e, 0x54, 0xe1, 0xc6, 0x03, - 0x76, 0x10, 0x61, 0x7a, 0x38, 0xa5, 0x42, 0xa2, 0x9b, 0xb0, 0xa1, 0x73, 0x5c, 0xd0, 0x01, 0x3d, - 0x07, 0x9b, 0x40, 0xed, 0xc6, 0x51, 0x12, 0x49, 0xb7, 0xda, 0x01, 0xbd, 0x1b, 0xd8, 0x04, 0x08, - 0xc1, 0xba, 0x90, 0x34, 0x73, 0x6b, 0x1d, 0xd0, 0xab, 0x61, 0xbd, 0x46, 0xdb, 0xb0, 0x1d, 0xa5, - 0x92, 0xf2, 0x47, 0x24, 0x76, 0x1d, 0xbd, 0xbf, 0x88, 0xd1, 0x27, 0xb0, 0x25, 0x24, 0xe1, 0x72, - 0x20, 0xdc, 0x7a, 0x07, 0xf4, 0x36, 0x76, 0xb6, 0x03, 0xa3, 0x4a, 0x50, 0xaa, 0x12, 0x0c, 0x4a, - 0x55, 0xc2, 0xf6, 0x71, 0xee, 0x57, 0x1e, 0xff, 0xee, 0x03, 0x5c, 0x82, 0xd0, 0x87, 0xb0, 0x41, - 0xd3, 0xd1, 0x40, 0xb8, 0x8d, 0x35, 0xd0, 0x06, 0x82, 0xde, 0x85, 0xce, 0x28, 0xe2, 0x74, 0x4f, - 0x46, 0x2c, 0x75, 0x9b, 0x1d, 0xd0, 0xdb, 0xdc, 0xd9, 0x0a, 0x16, 0x2a, 0xef, 0x96, 0x47, 0x78, - 0x99, 0xa5, 0xe8, 0x65, 0x44, 0x4e, 0xdc, 0x96, 0x56, 0x42, 0xaf, 0x51, 0x17, 0x36, 0xc5, 0x84, - 0xf0, 0x91, 0x70, 0xdb, 0x9d, 0x5a, 0xcf, 0x09, 0xe1, 0x3c, 0xf7, 0x8b, 0x1d, 0x5c, 0xfc, 0x77, - 0xff, 0x02, 0x10, 0x29, 0x49, 0xef, 0xa7, 0x42, 0x92, 0x54, 0xbe, 0x88, 0xb2, 0x1f, 0xc1, 0xa6, - 0xf2, 0xc7, 0x40, 0x68, 0x6d, 0xaf, 0x4a, 0xb5, 0xc0, 0xac, 0x72, 0xad, 0xaf, 0xc5, 0xb5, 0x71, - 0x2e, 0xd7, 0xe6, 0x85, 0x5c, 0xbf, 0xaf, 0xc3, 0x97, 0x8c, 0x7d, 0x44, 0xc6, 0x52, 0x41, 0x15, - 0xe8, 0xa1, 0x24, 0x72, 0x2a, 0x0c, 0xcd, 0x02, 0xa4, 0x77, 0x70, 0x71, 0x82, 0x3e, 0x85, 0xf5, - 0x5d, 0x22, 0x89, 0xa6, 0xbc, 0xb1, 0x73, 0x33, 0xb0, 0x4c, 0xa9, 0xee, 0x52, 0x67, 0xe1, 0x2d, - 0xc5, 0x6a, 0x9e, 0xfb, 0x9b, 0x23, 0x22, 0xc9, 0x3b, 0x2c, 0x89, 0x24, 0x4d, 0x32, 0x79, 0x84, - 0x35, 0x12, 0xbd, 0x0f, 0x9d, 0xcf, 0x38, 0x67, 0x7c, 0x70, 0x94, 0x51, 0x2d, 0x91, 0x13, 0xde, - 0x9e, 0xe7, 0xfe, 0x16, 0x2d, 0x37, 0x2d, 0xc4, 0x32, 0x13, 0xbd, 0x05, 0x1b, 0x3a, 0xd0, 0xa2, - 0x38, 0xe1, 0xd6, 0x3c, 0xf7, 0x5f, 0xd6, 0x10, 0x2b, 0xdd, 0x64, 0xac, 0x6a, 0xd8, 0xb8, 0x92, - 0x86, 0x8b, 0x52, 0x36, 0xed, 0x52, 0xba, 0xb0, 0xf5, 0x88, 0x72, 0xa1, 0xae, 0x69, 0xe9, 0xfd, - 0x32, 0x44, 0xf7, 0x20, 0x54, 0xc2, 0x44, 0x42, 0x46, 0x7b, 0xca, 0x4f, 0x4a, 0x8c, 0x1b, 0x81, - 0x69, 0x6a, 0x4c, 0xc5, 0x34, 0x96, 0x21, 0x2a, 0x54, 0xb0, 0x12, 0xb1, 0xb5, 0x46, 0x3f, 0x00, - 0xd8, 0xfa, 0x82, 0x92, 0x11, 0xe5, 0xc2, 0x75, 0x3a, 0xb5, 0xde, 0xc6, 0x4e, 0x2f, 0x58, 0xed, - 0xf8, 0xe0, 0x2b, 0xce, 0x12, 0x2a, 0x27, 0x74, 0x2a, 0xca, 0x1a, 0x19, 0x40, 0xf8, 0xcd, 0xd3, - 0xdc, 0xff, 0xda, 0x1e, 0x62, 0x9c, 0xec, 0x93, 0x94, 0xf4, 0x63, 0x76, 0x10, 0xf5, 0xaf, 0x34, - 0x4d, 0x2e, 0xbc, 0x7b, 0x9e, 0xfb, 0xe0, 0x0e, 0x2e, 0x5f, 0xd6, 0xfd, 0x0d, 0xc0, 0x57, 0x55, - 0x61, 0x1f, 0xaa, 0xfb, 0x84, 0xd5, 0x0f, 0x09, 0x91, 0x7b, 0x13, 0x17, 0x28, 0x77, 0x61, 0x13, - 0xd8, 0x33, 0xa2, 0x7a, 0xad, 0x19, 0x51, 0x5b, 0x7f, 0x46, 0x94, 0x4d, 0x50, 0x3f, 0xb7, 0x09, - 0x1a, 0x17, 0x36, 0xc1, 0xaf, 0x55, 0xd3, 0xf0, 0x25, 0xbf, 0x35, 0x5a, 0xe1, 0xf3, 0x45, 0x2b, - 0xd4, 0xf4, 0x6b, 0x17, 0x0e, 0x33, 0x77, 0xdd, 0x1f, 0xd1, 0x54, 0x46, 0xfb, 0x11, 0xe5, 0x97, - 0x34, 0x84, 0xe5, 0xb2, 0xda, 0xaa, 0xcb, 0x6c, 0x8b, 0xd4, 0xff, 0xb3, 0x16, 0xf9, 0x09, 0xc0, - 0xd7, 0x94, 0x84, 0x0f, 0xc8, 0x90, 0xc6, 0x5f, 0x92, 0x64, 0x69, 0x13, 0xcb, 0x10, 0xe0, 0x5a, - 0x86, 0xa8, 0xbe, 0xb8, 0x21, 0x6a, 0x4b, 0x43, 0x74, 0x7f, 0xac, 0xc2, 0x5b, 0xcf, 0xbf, 0x74, - 0x8d, 0x82, 0xbf, 0x61, 0x15, 0xdc, 0x09, 0xd1, 0xff, 0xb6, 0xa0, 0xbf, 0x00, 0xd8, 0x2e, 0x87, - 0x39, 0x0a, 0x20, 0x34, 0x03, 0x4d, 0xcf, 0x6b, 0x23, 0xce, 0xa6, 0x1a, 0x6b, 0x7c, 0xb1, 0x8b, - 0xad, 0x0c, 0x94, 0xc2, 0xa6, 0x89, 0x8a, 0xbe, 0xb8, 0x6d, 0xf5, 0x85, 0xe4, 0x94, 0x24, 0xf7, - 0x46, 0x24, 0x93, 0x94, 0x87, 0x1f, 0xab, 0x8a, 0x3d, 0xcd, 0xfd, 0xb7, 0xff, 0x8d, 0xd3, 0x73, - 0x58, 0x55, 0x14, 0xf3, 0x5d, 0x5c, 0x7c, 0xa5, 0xfb, 0x1d, 0x80, 0xaf, 0xa8, 0xc7, 0x2a, 0x6e, - 0x8b, 0x6a, 0xee, 0xc2, 0x36, 0x2f, 0xd6, 0x85, 0xf3, 0xba, 0x97, 0xeb, 0x1c, 0xd6, 0x8f, 0x73, - 0x1f, 0xe0, 0x05, 0x12, 0xdd, 0x5d, 0x19, 0xf2, 0xd5, 0xf3, 0x86, 0xbc, 0x82, 0x54, 0xec, 0xb1, - 0x1e, 0xbe, 0x77, 0x72, 0xea, 0x55, 0x9e, 0x9c, 0x7a, 0x95, 0x67, 0xa7, 0x1e, 0xf8, 0x76, 0xe6, - 0x81, 0x9f, 0x67, 0x1e, 0x38, 0x9e, 0x79, 0xe0, 0x64, 0xe6, 0x81, 0x3f, 0x66, 0x1e, 0xf8, 0x73, - 0xe6, 0x55, 0x9e, 0xcd, 0x3c, 0xf0, 0xf8, 0xcc, 0xab, 0x9c, 0x9c, 0x79, 0x95, 0x27, 0x67, 0x5e, - 0x65, 0xd8, 0xd4, 0x2c, 0xef, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x19, 0xca, 0xfc, 0xcf, - 0x0a, 0x00, 0x00, + // 923 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xcf, 0x6f, 0x23, 0x35, + 0x14, 0x8e, 0x33, 0xf9, 0x35, 0x2e, 0x5b, 0xc0, 0x5d, 0x76, 0x47, 0x45, 0x9a, 0x89, 0x72, 0x80, + 0x20, 0xd8, 0x89, 0xe8, 0x02, 0x07, 0x04, 0x88, 0x1d, 0x15, 0xc4, 0x4a, 0x2b, 0x84, 0x66, 0x23, + 0xae, 0xc8, 0x69, 0xdc, 0x64, 0xd4, 0x99, 0xf1, 0xd4, 0x76, 0x56, 0xea, 0x0d, 0x89, 0x33, 0xd2, + 0xfe, 0x0d, 0x70, 0x00, 0xf1, 0x97, 0xf4, 0xd8, 0xe3, 0x6a, 0x25, 0x06, 0x9a, 0x1e, 0x80, 0x9c, + 0xf6, 0x4f, 0x40, 0xb6, 0x67, 0x12, 0xa7, 0x34, 0xb4, 0x69, 0x2f, 0x1c, 0xb8, 0x24, 0x7e, 0xcf, + 0xef, 0x73, 0xfc, 0xbe, 0xf7, 0x7d, 0x0e, 0x7c, 0x33, 0x3b, 0x18, 0xf5, 0x0e, 0x27, 0x84, 0x45, + 0x84, 0xa9, 0xef, 0x23, 0x86, 0xd3, 0x11, 0x31, 0x96, 0x7e, 0xc6, 0xa8, 0xa0, 0x08, 0x2e, 0x32, + 0xdb, 0xf7, 0x46, 0x91, 0x18, 0x4f, 0x06, 0xfe, 0x1e, 0x4d, 0x7a, 0x23, 0x3a, 0xa2, 0x3d, 0x55, + 0x32, 0x98, 0xec, 0xab, 0x48, 0x05, 0x6a, 0xa5, 0xa1, 0xdb, 0xde, 0x88, 0xd2, 0x51, 0x4c, 0x16, + 0x55, 0x22, 0x4a, 0x08, 0x17, 0x38, 0xc9, 0x8a, 0x82, 0xd7, 0xe5, 0x25, 0x62, 0x3a, 0xd2, 0xc8, + 0x72, 0x51, 0x6c, 0xb6, 0x8b, 0xcd, 0xc3, 0x38, 0xa1, 0x43, 0x12, 0xf7, 0xb8, 0xc0, 0x82, 0xeb, + 0xcf, 0xa2, 0xe2, 0x83, 0x4b, 0x7b, 0x18, 0x60, 0xfe, 0xcf, 0x96, 0x3a, 0x27, 0x55, 0xb8, 0xf1, + 0x88, 0x1e, 0x44, 0x21, 0x39, 0x9c, 0x10, 0x2e, 0xd0, 0x6d, 0x58, 0x57, 0x35, 0x0e, 0x68, 0x83, + 0xae, 0x1d, 0xea, 0x40, 0x66, 0xe3, 0x28, 0x89, 0x84, 0x53, 0x6d, 0x83, 0xee, 0xad, 0x50, 0x07, + 0x08, 0xc1, 0x1a, 0x17, 0x24, 0x73, 0xac, 0x36, 0xe8, 0x5a, 0xa1, 0x5a, 0xa3, 0x6d, 0xd8, 0x8a, + 0x52, 0x41, 0xd8, 0x13, 0x1c, 0x3b, 0xb6, 0xca, 0xcf, 0x63, 0xf4, 0x09, 0x6c, 0x72, 0x81, 0x99, + 0xe8, 0x73, 0xa7, 0xd6, 0x06, 0xdd, 0x8d, 0x9d, 0x6d, 0x5f, 0xb3, 0xe2, 0x97, 0xac, 0xf8, 0xfd, + 0x92, 0x95, 0xa0, 0x75, 0x9c, 0x7b, 0x95, 0xa7, 0xbf, 0x79, 0x20, 0x2c, 0x41, 0xe8, 0x43, 0x58, + 0x27, 0xe9, 0xb0, 0xcf, 0x9d, 0xfa, 0x1a, 0x68, 0x0d, 0x41, 0xef, 0x42, 0x7b, 0x18, 0x31, 0xb2, + 0x27, 0x22, 0x9a, 0x3a, 0x8d, 0x36, 0xe8, 0x6e, 0xee, 0x6c, 0xf9, 0x73, 0x96, 0x77, 0xcb, 0xad, + 0x70, 0x51, 0x25, 0xdb, 0xcb, 0xb0, 0x18, 0x3b, 0x4d, 0xc5, 0x84, 0x5a, 0xa3, 0x0e, 0x6c, 0xf0, + 0x31, 0x66, 0x43, 0xee, 0xb4, 0xda, 0x56, 0xd7, 0x0e, 0xe0, 0x2c, 0xf7, 0x8a, 0x4c, 0x58, 0x7c, + 0x77, 0xfe, 0x02, 0x10, 0x49, 0x4a, 0x1f, 0xa6, 0x5c, 0xe0, 0x54, 0x5c, 0x87, 0xd9, 0x8f, 0x60, + 0x43, 0xea, 0xa3, 0xcf, 0x15, 0xb7, 0x57, 0x6d, 0xb5, 0xc0, 0x2c, 0xf7, 0x5a, 0x5b, 0xab, 0xd7, + 0xfa, 0x85, 0xbd, 0x36, 0x56, 0xf6, 0xfa, 0x43, 0x0d, 0xbe, 0xa4, 0xe5, 0xc3, 0x33, 0x9a, 0x72, + 0x22, 0x41, 0x8f, 0x05, 0x16, 0x13, 0xae, 0xdb, 0x2c, 0x40, 0x2a, 0x13, 0x16, 0x3b, 0xe8, 0x53, + 0x58, 0xdb, 0xc5, 0x02, 0xab, 0x96, 0x37, 0x76, 0x6e, 0xfb, 0x86, 0x28, 0xe5, 0x59, 0x72, 0x2f, + 0xb8, 0x23, 0xbb, 0x9a, 0xe5, 0xde, 0xe6, 0x10, 0x0b, 0xfc, 0x0e, 0x4d, 0x22, 0x41, 0x92, 0x4c, + 0x1c, 0x85, 0x0a, 0x89, 0xde, 0x87, 0xf6, 0x67, 0x8c, 0x51, 0xd6, 0x3f, 0xca, 0x88, 0xa2, 0xc8, + 0x0e, 0xee, 0xce, 0x72, 0x6f, 0x8b, 0x94, 0x49, 0x03, 0xb1, 0xa8, 0x44, 0x6f, 0xc1, 0xba, 0x0a, + 0x14, 0x29, 0x76, 0xb0, 0x35, 0xcb, 0xbd, 0x97, 0x15, 0xc4, 0x28, 0xd7, 0x15, 0xcb, 0x1c, 0xd6, + 0xaf, 0xc4, 0xe1, 0x7c, 0x94, 0x0d, 0x73, 0x94, 0x0e, 0x6c, 0x3e, 0x21, 0x8c, 0xcb, 0x63, 0x9a, + 0x2a, 0x5f, 0x86, 0xe8, 0x01, 0x84, 0x92, 0x98, 0x88, 0x8b, 0x68, 0x4f, 0xea, 0x49, 0x92, 0x71, + 0xcb, 0xd7, 0xa6, 0x0e, 0x09, 0x9f, 0xc4, 0x22, 0x40, 0x05, 0x0b, 0x46, 0x61, 0x68, 0xac, 0xd1, + 0x8f, 0x00, 0x36, 0xbf, 0x20, 0x78, 0x48, 0x18, 0x77, 0xec, 0xb6, 0xd5, 0xdd, 0xd8, 0xe9, 0xfa, + 0xcb, 0x8e, 0xf7, 0xbf, 0x62, 0x34, 0x21, 0x62, 0x4c, 0x26, 0xbc, 0x9c, 0x91, 0x06, 0x04, 0xdf, + 0x3c, 0xcf, 0xbd, 0xaf, 0xcd, 0x47, 0x8c, 0xe1, 0x7d, 0x9c, 0xe2, 0x5e, 0x4c, 0x0f, 0xa2, 0xde, + 0x95, 0x5e, 0x93, 0x95, 0x67, 0xcf, 0x72, 0x0f, 0xdc, 0x0b, 0xcb, 0x9b, 0x75, 0x7e, 0x05, 0xf0, + 0x55, 0x39, 0xd8, 0xc7, 0xf2, 0x3c, 0x6e, 0xf8, 0x21, 0xc1, 0x62, 0x6f, 0xec, 0x00, 0xa9, 0xae, + 0x50, 0x07, 0xe6, 0x1b, 0x51, 0xbd, 0xd1, 0x1b, 0x61, 0xad, 0xff, 0x46, 0x94, 0x26, 0xa8, 0x5d, + 0x68, 0x82, 0xfa, 0x4a, 0x13, 0x7c, 0x67, 0x69, 0xc3, 0x97, 0xfd, 0xad, 0x61, 0x85, 0xcf, 0xe7, + 0x56, 0xb0, 0xd4, 0x6d, 0xe7, 0x0a, 0xd3, 0x67, 0x3d, 0x1c, 0x92, 0x54, 0x44, 0xfb, 0x11, 0x61, + 0x97, 0x18, 0xc2, 0x50, 0x99, 0xb5, 0xac, 0x32, 0x53, 0x22, 0xb5, 0xff, 0xaa, 0x44, 0xce, 0x79, + 0xa1, 0x7e, 0x0d, 0x2f, 0x74, 0x7e, 0x02, 0xf0, 0x35, 0x39, 0x85, 0x47, 0x78, 0x40, 0xe2, 0x2f, + 0x71, 0xb2, 0x50, 0x9a, 0xa1, 0x29, 0x70, 0x23, 0x4d, 0x55, 0xaf, 0xaf, 0x29, 0x6b, 0xa1, 0xa9, + 0xce, 0x1f, 0x55, 0x78, 0xe7, 0xfc, 0x4d, 0xd7, 0xd0, 0xcc, 0x1b, 0x86, 0x66, 0xec, 0x00, 0xfd, + 0xaf, 0x89, 0xd5, 0x9a, 0xf8, 0x05, 0xc0, 0x56, 0xf9, 0x97, 0x82, 0x7c, 0x08, 0x35, 0x4c, 0xfd, + 0x6b, 0x68, 0x7e, 0x37, 0x25, 0x98, 0xcd, 0xb3, 0xa1, 0x51, 0x81, 0x52, 0xd8, 0xd0, 0x51, 0xe1, + 0xce, 0xbb, 0x86, 0x3b, 0x05, 0x23, 0x38, 0x79, 0x30, 0xc4, 0x99, 0x20, 0x2c, 0xf8, 0x58, 0xde, + 0xe2, 0x79, 0xee, 0xbd, 0xfd, 0x6f, 0xb4, 0x9c, 0xc3, 0xca, 0xb9, 0xea, 0xdf, 0x0d, 0x8b, 0x5f, + 0xe9, 0x7c, 0x0f, 0xe0, 0x2b, 0xf2, 0xb2, 0x92, 0x9e, 0xb9, 0x20, 0x76, 0x61, 0x8b, 0x15, 0xeb, + 0x42, 0xbc, 0x9d, 0xcb, 0x47, 0x15, 0xd4, 0x8e, 0x73, 0x0f, 0x84, 0x73, 0x24, 0xba, 0xbf, 0x44, + 0x65, 0xf5, 0x22, 0x2a, 0x25, 0xa4, 0x62, 0x92, 0x17, 0xbc, 0x77, 0x72, 0xea, 0x56, 0x9e, 0x9d, + 0xba, 0x95, 0x17, 0xa7, 0x2e, 0xf8, 0x76, 0xea, 0x82, 0x9f, 0xa7, 0x2e, 0x38, 0x9e, 0xba, 0xe0, + 0x64, 0xea, 0x82, 0xdf, 0xa7, 0x2e, 0xf8, 0x73, 0xea, 0x56, 0x5e, 0x4c, 0x5d, 0xf0, 0xf4, 0xcc, + 0xad, 0x9c, 0x9c, 0xb9, 0x95, 0x67, 0x67, 0x6e, 0x65, 0xd0, 0x50, 0x5d, 0xde, 0xff, 0x3b, 0x00, + 0x00, 0xff, 0xff, 0x13, 0x18, 0x8f, 0x0c, 0x55, 0x0b, 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { @@ -989,6 +1005,9 @@ func (this *LokiSeriesResponse) Equal(that interface{}) bool { return false } } + if !this.Statistics.Equal(&that1.Statistics) { + return false + } return true } func (this *LokiLabelNamesRequest) Equal(that interface{}) bool { @@ -1062,6 +1081,9 @@ func (this *LokiLabelNamesResponse) Equal(that interface{}) bool { return false } } + if !this.Statistics.Equal(&that1.Statistics) { + return false + } return true } func (this *LokiData) Equal(that interface{}) bool { @@ -1192,7 +1214,7 @@ func (this *LokiSeriesResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&queryrange.LokiSeriesResponse{") s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") if this.Data != nil { @@ -1204,6 +1226,7 @@ func (this *LokiSeriesResponse) GoString() string { } s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") + s = append(s, "Statistics: "+strings.Replace(this.Statistics.GoString(), `&`, ``, 1)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1223,12 +1246,13 @@ func (this *LokiLabelNamesResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&queryrange.LokiLabelNamesResponse{") s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") + s = append(s, "Statistics: "+strings.Replace(this.Statistics.GoString(), `&`, ``, 1)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1587,6 +1611,16 @@ func (m *LokiSeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.Statistics.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if len(m.Headers) > 0 { for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { { @@ -1657,21 +1691,21 @@ func (m *LokiLabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintQueryrange(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x12 - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) if err9 != nil { return 0, err9 } i -= n9 i = encodeVarintQueryrange(dAtA, i, uint64(n9)) i-- + dAtA[i] = 0x12 + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintQueryrange(dAtA, i, uint64(n10)) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -1696,6 +1730,16 @@ func (m *LokiLabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + { + size, err := m.Statistics.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if len(m.Headers) > 0 { for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { { @@ -1998,6 +2042,8 @@ func (m *LokiSeriesResponse) Size() (n int) { n += 1 + l + sovQueryrange(uint64(l)) } } + l = m.Statistics.Size() + n += 1 + l + sovQueryrange(uint64(l)) return n } @@ -2043,6 +2089,8 @@ func (m *LokiLabelNamesResponse) Size() (n int) { n += 1 + l + sovQueryrange(uint64(l)) } } + l = m.Statistics.Size() + n += 1 + l + sovQueryrange(uint64(l)) return n } @@ -2165,6 +2213,7 @@ func (this *LokiSeriesResponse) String() string { `Data:` + repeatedStringForData + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `Headers:` + fmt.Sprintf("%v", this.Headers) + `,`, + `Statistics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statistics), "Result", "stats.Result", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2190,6 +2239,7 @@ func (this *LokiLabelNamesResponse) String() string { `Data:` + fmt.Sprintf("%v", this.Data) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `Headers:` + fmt.Sprintf("%v", this.Headers) + `,`, + `Statistics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statistics), "Result", "stats.Result", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3404,6 +3454,39 @@ func (m *LokiSeriesResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Statistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) @@ -3725,6 +3808,39 @@ func (m *LokiLabelNamesResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Statistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 48593f659dad4..007fe94a2988f 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -87,6 +87,10 @@ message LokiSeriesResponse { (gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" ]; + stats.Result statistics = 5 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "statistics" + ]; } message LokiLabelNamesRequest { @@ -109,6 +113,10 @@ message LokiLabelNamesResponse { (gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader" ]; + stats.Result statistics = 5 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "statistics" + ]; } message LokiData { diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index f85389099f6f2..23e042c819d8c 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -312,6 +312,7 @@ func NewSeriesTripperware( schema config.SchemaConfig, ) (queryrangebase.Tripperware, error) { queryRangeMiddleware := []queryrangebase.Middleware{ + StatsCollectorMiddleware(), NewLimitsMiddleware(limits), queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), // The Series API needs to pull one chunk per series to extract the label set, which is much cheaper than iterating through all matching chunks. @@ -357,6 +358,7 @@ func NewLabelsTripperware( metrics *Metrics, ) (queryrangebase.Tripperware, error) { queryRangeMiddleware := []queryrangebase.Middleware{ + StatsCollectorMiddleware(), NewLimitsMiddleware(limits), queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), // Force a 24 hours split by for labels API, this will be more efficient with our static daily bucket storage. diff --git a/pkg/querier/queryrange/stats.go b/pkg/querier/queryrange/stats.go index 08c67cc9d755f..cc0c2b6b6e491 100644 --- a/pkg/querier/queryrange/stats.go +++ b/pkg/querier/queryrange/stats.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "strconv" + "strings" "time" "github.com/go-kit/log" @@ -26,14 +27,37 @@ type ctxKeyType string const ctxKey ctxKeyType = "stats" +const ( + queryTypeLog = "log" + queryTypeMetric = "metric" + queryTypeSeries = "series" + queryTypeLabel = "label" +) + var ( defaultMetricRecorder = metricRecorderFn(func(data *queryData) { - logql.RecordMetrics(data.ctx, log.With(util_log.Logger, "component", "frontend"), data.params, data.status, *data.statistics, data.result) + recordQueryMetrics(data) }) - // StatsHTTPMiddleware is an http middleware to record stats for query_range filter. - StatsHTTPMiddleware = statsHTTPMiddleware(defaultMetricRecorder) + + StatsHTTPMiddleware middleware.Interface = statsHTTPMiddleware(defaultMetricRecorder) ) +// recordQueryMetrics will be called from Query Frontend middleware chain for any type of query. +func recordQueryMetrics(data *queryData) { + logger := log.With(util_log.Logger, "component", "frontend") + + switch data.queryType { + case queryTypeLog, queryTypeMetric: + logql.RecordRangeAndInstantQueryMetrics(data.ctx, logger, data.params, data.status, *data.statistics, data.result) + case queryTypeLabel: + logql.RecordLabelQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.label, data.status, *data.statistics) + case queryTypeSeries: + logql.RecordSeriesQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.match, data.status, *data.statistics) + default: + level.Error(logger).Log("msg", "failed to record query metrics", "err", fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, got %s", data.queryType)) + } +} + type metricRecorder interface { Record(data *queryData) } @@ -50,6 +74,9 @@ type queryData struct { statistics *stats.Result result promql_parser.Value status string + queryType string + match []string // used in `series` query. + label string // used in `labels` query recorded bool } @@ -64,8 +91,6 @@ func statsHTTPMiddleware(recorder metricRecorder) middleware.Interface { interceptor, r, ) - // http middlewares runs for every http request. - // but we want only to record query_range filters. if data.recorded { if data.statistics == nil { data.statistics = &stats.Result{} @@ -91,21 +116,38 @@ func StatsCollectorMiddleware() queryrangebase.Middleware { // collect stats and status var statistics *stats.Result var res promql_parser.Value + var queryType string + var totalEntries int + if resp != nil { switch r := resp.(type) { case *LokiResponse: statistics = &r.Statistics - res = logqlmodel.Streams(r.Data.Result) + totalEntries = int(logqlmodel.Streams(r.Data.Result).Lines()) + queryType = queryTypeLog case *LokiPromResponse: statistics = &r.Statistics + if r.Response != nil { + totalEntries = len(r.Response.Data.Result) + } + queryType = queryTypeMetric + case *LokiSeriesResponse: + statistics = &r.Statistics + totalEntries = len(r.Data) + queryType = queryTypeSeries + case *LokiLabelNamesResponse: + statistics = &r.Statistics + totalEntries = len(r.Data) + queryType = queryTypeLabel default: level.Warn(logger).Log("msg", fmt.Sprintf("cannot compute stats, unexpected type: %T", resp)) } } + if statistics != nil { // Re-calculate the summary: the queueTime result is already merged so should not be updated // Log and record metrics for the current query - statistics.ComputeSummary(time.Since(start), 0) + statistics.ComputeSummary(time.Since(start), 0, totalEntries) statistics.Log(level.Debug(logger)) } ctxValue := ctx.Value(ctxKey) @@ -113,17 +155,46 @@ func StatsCollectorMiddleware() queryrangebase.Middleware { data.recorded = true data.statistics = statistics data.result = res + data.queryType = queryType p, errReq := paramsFromRequest(req) if errReq != nil { return nil, errReq } data.params = p + + // Record information for metadata queries. + switch r := req.(type) { + case *LokiLabelNamesRequest: + data.label = getLabelNameFromLabelsQuery(r.Path) + case *LokiSeriesRequest: + data.match = r.Match + } + } return resp, err }) }) } +func getLabelNameFromLabelsQuery(path string) string { + if strings.HasSuffix(path, "/values") { + + toks := strings.FieldsFunc(path, func(r rune) bool { + return r == '/' + }) + + // now assuming path has suffix `/values` label name should be second last to the suffix + // **if** there exists the second last. + length := len(toks) + if length >= 2 { + return toks[length-2] + } + + } + + return "" +} + // interceptor implements WriteHeader to intercept status codes. WriteHeader // may not be called on success, so initialize statusCode with the status you // want to report on success, i.e. http.StatusOK. diff --git a/pkg/storage/batch_test.go b/pkg/storage/batch_test.go index fa68aaee3f1d8..cc9a7ba13cf74 100644 --- a/pkg/storage/batch_test.go +++ b/pkg/storage/batch_test.go @@ -1704,7 +1704,7 @@ func Benchmark_store_OverlappingChunks(b *testing.B) { b.Fatal(err) } } - r := statsCtx.Result(time.Since(start), 0) + r := statsCtx.Result(time.Since(start), 0, 0) b.Log("Total chunks:" + fmt.Sprintf("%d", r.TotalChunksRef())) b.Log("Total bytes decompressed:" + fmt.Sprintf("%d", r.TotalDecompressedBytes())) } diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go index 19e9cda4c137a..e565c40aabeab 100644 --- a/pkg/util/marshal/legacy/marshal_test.go +++ b/pkg/util/marshal/legacy/marshal_test.go @@ -85,6 +85,7 @@ var queryTests = []struct { "queueTime": 0, "subqueries": 0, "totalBytesProcessed":0, + "totalEntriesReturned":0, "totalLinesProcessed":0 } } diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go index 248f8e32428a8..d11db731b492f 100644 --- a/pkg/util/marshal/marshal_test.go +++ b/pkg/util/marshal/marshal_test.go @@ -91,6 +91,7 @@ var queryTests = []struct { "queueTime": 0, "subqueries": 0, "totalBytesProcessed":0, + "totalEntriesReturned":0, "totalLinesProcessed":0 } } @@ -200,6 +201,7 @@ var queryTests = []struct { "queueTime": 0, "subqueries": 0, "totalBytesProcessed":0, + "totalEntriesReturned":0, "totalLinesProcessed":0 } } @@ -326,6 +328,7 @@ var queryTests = []struct { "queueTime": 0, "subqueries": 0, "totalBytesProcessed":0, + "totalEntriesReturned":0, "totalLinesProcessed":0 } } diff --git a/pkg/util/server/error.go b/pkg/util/server/error.go index 897463d0a85ab..8d7a82a374fc3 100644 --- a/pkg/util/server/error.go +++ b/pkg/util/server/error.go @@ -26,6 +26,13 @@ const ( // WriteError write a go error with the correct status code. func WriteError(err error, w http.ResponseWriter) { + status, cerr := ClientHTTPStatusAndError(err) + http.Error(w, cerr.Error(), status) +} + +// ClientHTTPStatusAndError returns error and http status that is "safe" to return to client without +// exposing any implementation details. +func ClientHTTPStatusAndError(err error) (int, error) { var ( queryErr storage_errors.QueryError promErr promql.ErrStorage @@ -33,33 +40,30 @@ func WriteError(err error, w http.ResponseWriter) { me, ok := err.(util.MultiError) if ok && me.Is(context.Canceled) { - http.Error(w, ErrClientCanceled, StatusClientClosedRequest) - return + return StatusClientClosedRequest, errors.New(ErrClientCanceled) } if ok && me.IsDeadlineExceeded() { - http.Error(w, ErrDeadlineExceeded, http.StatusGatewayTimeout) - return + return http.StatusGatewayTimeout, errors.New(ErrDeadlineExceeded) } s, isRPC := status.FromError(err) switch { case errors.Is(err, context.Canceled) || (errors.As(err, &promErr) && errors.Is(promErr.Err, context.Canceled)): - http.Error(w, ErrClientCanceled, StatusClientClosedRequest) + return StatusClientClosedRequest, errors.New(ErrClientCanceled) case errors.Is(err, context.DeadlineExceeded) || (isRPC && s.Code() == codes.DeadlineExceeded): - http.Error(w, ErrDeadlineExceeded, http.StatusGatewayTimeout) + return http.StatusGatewayTimeout, errors.New(ErrDeadlineExceeded) case errors.As(err, &queryErr): - http.Error(w, err.Error(), http.StatusBadRequest) + return http.StatusBadRequest, err case errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline): - http.Error(w, err.Error(), http.StatusBadRequest) + return http.StatusBadRequest, err case errors.Is(err, user.ErrNoOrgID): - http.Error(w, err.Error(), http.StatusBadRequest) + return http.StatusBadRequest, err default: if grpcErr, ok := httpgrpc.HTTPResponseFromError(err); ok { - http.Error(w, string(grpcErr.Body), int(grpcErr.Code)) - return + return int(grpcErr.Code), errors.New(string(grpcErr.Body)) } - http.Error(w, err.Error(), http.StatusInternalServerError) + return http.StatusInternalServerError, err } } From d97724e8dd12214092932f7e82cd46a3bb6bfd74 Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Wed, 4 May 2022 08:50:09 -0600 Subject: [PATCH 067/223] Introduce coverage to PR pipelines (#5357) * Add code coverage diff to CI * remove test package from coverage diff * review feedback * lint * Env variable test * faster experiment * drone vars * drone vars * how does the shell work? * add pr comment * escape json * report diff in comment * properly format json * quote post body * review feedback * add querier/queryrange to coverage --- .drone/drone.jsonnet | 30 ++++++++++++++++++---- .drone/drone.yml | 50 +++++++++++++++++++++++++++++++++++-- .gitignore | 3 ++- Makefile | 7 ++++-- loki-build-image/Dockerfile | 2 +- tools/diff_coverage.sh | 18 +++++++++++++ 6 files changed, 99 insertions(+), 11 deletions(-) create mode 100755 tools/diff_coverage.sh diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index 32cd9bf017b93..4c8e145b764f1 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -46,15 +46,19 @@ local github_secret = secret('github_token', 'infra/data/ci/github/grafanabot', // Injected in a secret because this is a public repository and having the config here would leak our environment names local deploy_configuration = secret('deploy_config', 'secret/data/common/loki_ci_autodeploy', 'config.json'); - -local run(name, commands) = { +local run(name, commands, env={}) = { name: name, image: 'grafana/loki-build-image:%s' % build_image_version, commands: commands, + environment: env, }; -local make(target, container=true) = run(target, [ - 'make ' + (if !container then 'BUILD_IN_CONTAINER=false ' else '') + target, +local make(target, container=true, args=[]) = run(target, [ + std.join(' ', [ + 'make', + 'BUILD_IN_CONTAINER=' + container, + target, + ] + args), ]); local docker(arch, app) = { @@ -369,7 +373,23 @@ local manifest(apps) = pipeline('manifest') { steps: [ make('check-drone-drift', container=false) { depends_on: ['clone'] }, make('check-generated-files', container=false) { depends_on: ['clone'] }, - make('test', container=false) { depends_on: ['clone', 'check-generated-files'] }, + make('test', container=false) { depends_on: ['clone'] }, + run('clone-main', commands=['cd ..', 'git clone $CI_REPO_REMOTE loki-main', 'cd -']), + run('test-main', commands=['cd ../loki-main', 'BUILD_IN_CONTAINER=false make test']) { depends_on: ['clone-main'] }, + make('compare-coverage', container=false, args=[ + 'old=../loki-main/test_results.txt', + 'new=test_results.txt', + 'packages=ingester,distributor,querier,querier/queryrange,iter,storage,chunkenc,logql,loki', + '> diff.txt', + ]) { depends_on: ['test', 'test-main'] }, + run('report-coverage', commands=[ + "pull=$(echo $CI_COMMIT_REF | awk -F '/' '{print $3}')", + "body=$(jq -Rs '{body: . }' diff.txt)", + 'curl -X POST -u $USER:$TOKEN -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/grafana/loki/issues/$pull/comments -d "$body" > /dev/null', + ], env={ + USER: 'grafanabot', + TOKEN: { from_secret: github_secret.name }, + }) { depends_on: ['compare-coverage'] }, make('lint', container=false) { depends_on: ['clone', 'check-generated-files'] }, make('check-mod', container=false) { depends_on: ['clone', 'test', 'lint'] }, { diff --git a/.drone/drone.yml b/.drone/drone.yml index 90a2691ee8388..a9a1b250eb5fc 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -40,26 +40,67 @@ steps: - make BUILD_IN_CONTAINER=false check-drone-drift depends_on: - clone + environment: {} image: grafana/loki-build-image:0.20.4 name: check-drone-drift - commands: - make BUILD_IN_CONTAINER=false check-generated-files depends_on: - clone + environment: {} image: grafana/loki-build-image:0.20.4 name: check-generated-files - commands: - make BUILD_IN_CONTAINER=false test depends_on: - clone - - check-generated-files + environment: {} image: grafana/loki-build-image:0.20.4 name: test +- commands: + - cd .. + - git clone $CI_REPO_REMOTE loki-main + - cd - + environment: {} + image: grafana/loki-build-image:0.20.4 + name: clone-main +- commands: + - cd ../loki-main + - BUILD_IN_CONTAINER=false make test + depends_on: + - clone-main + environment: {} + image: grafana/loki-build-image:0.20.4 + name: test-main +- commands: + - make BUILD_IN_CONTAINER=false compare-coverage old=../loki-main/test_results.txt + new=test_results.txt packages=ingester,distributor,querier,querier/queryrange,iter,storage,chunkenc,logql,loki + > diff.txt + depends_on: + - test + - test-main + environment: {} + image: grafana/loki-build-image:0.20.4 + name: compare-coverage +- commands: + - pull=$(echo $CI_COMMIT_REF | awk -F '/' '{print $3}') + - 'body=$(jq -Rs ''{body: . }'' diff.txt)' + - 'curl -X POST -u $USER:$TOKEN -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/grafana/loki/issues/$pull/comments + -d "$body" > /dev/null' + depends_on: + - compare-coverage + environment: + TOKEN: + from_secret: github_token + USER: grafanabot + image: grafana/loki-build-image:0.20.4 + name: report-coverage - commands: - make BUILD_IN_CONTAINER=false lint depends_on: - clone - check-generated-files + environment: {} image: grafana/loki-build-image:0.20.4 name: lint - commands: @@ -68,6 +109,7 @@ steps: - clone - test - lint + environment: {} image: grafana/loki-build-image:0.20.4 name: check-mod - commands: @@ -79,18 +121,21 @@ steps: depends_on: - clone - check-generated-files + environment: {} image: grafana/loki-build-image:0.20.4 name: loki - commands: - make BUILD_IN_CONTAINER=false validate-example-configs depends_on: - loki + environment: {} image: grafana/loki-build-image:0.20.4 name: validate-example-configs - commands: - make BUILD_IN_CONTAINER=false check-example-config-doc depends_on: - clone + environment: {} image: grafana/loki-build-image:0.20.4 name: check-example-config-doc trigger: @@ -109,6 +154,7 @@ steps: - make BUILD_IN_CONTAINER=false lint-jsonnet depends_on: - clone + environment: {} image: grafana/jsonnet-build:c8b75df name: lint-jsonnet trigger: @@ -1118,6 +1164,6 @@ kind: secret name: deploy_config --- kind: signature -hmac: 96966b3eec7f8976408f2c2f2c36a29b87a694c8a32afb2fdb16209e1f9e7521 +hmac: 4596e741ac788d461b3bbb2429c1f61efabaf943aeec6b3cd59eeff8d769de5e ... diff --git a/.gitignore b/.gitignore index a017012c7ed4b..dc3b86a0317ce 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ dlv rootfs/ dist coverage.txt +test_results.txt .DS_Store .aws-sam .idea @@ -40,4 +41,4 @@ coverage.txt *.tfvars # vscode -.vscode \ No newline at end of file +.vscode diff --git a/Makefile b/Makefile index 278aec0cd3629..053ea578add7f 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ .PHONY: validate-example-configs generate-example-config-doc check-example-config-doc .PHONY: clean clean-protos -SHELL = /usr/bin/env bash +SHELL = /usr/bin/env bash -o pipefail GOTEST ?= go test @@ -260,7 +260,10 @@ lint: ######## test: all - $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... + $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... | tee test_results.txt + +compare-coverage: + ./tools/diff_coverage.sh $(old) $(new) $(packages) ######### # Clean # diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 99a7d05ac4529..9579edb2e0e67 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -68,7 +68,7 @@ RUN apt-get update && \ musl gnupg ragel \ file zip unzip jq gettext\ protobuf-compiler libprotobuf-dev \ - libsystemd-dev && \ + libsystemd-dev jq && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* COPY --from=docker /usr/bin/docker /usr/bin/docker diff --git a/tools/diff_coverage.sh b/tools/diff_coverage.sh new file mode 100755 index 0000000000000..188821e91248b --- /dev/null +++ b/tools/diff_coverage.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ ! -f "$1" ]] || [[ ! -f "$2" ]]; then + echo "unable to compare test coverage: both old and new files must exist" + exit 0 +fi + +echo '```diff' +for pkg in ${3//,/ }; do + old=$(grep "pkg/${pkg}\s" "$1" | sed s/%// | awk '{print $5}') + new=$(grep "pkg/${pkg}\s" "$2" | sed s/%// | awk '{print $5}') + echo | awk -v pkg="${pkg}" -v old="${old:-0}" -v new="${new:-0}" \ + '{ + sign=new - old < 0 ? "-" : "+" + printf ("%s %11s\t%s\n", sign, pkg, new - old) + }' +done +echo '```' From 0a68bd2d4e440ea1300bb3347e9a3e6c15069cb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Farkas?= Date: Wed, 4 May 2022 17:18:31 +0200 Subject: [PATCH 068/223] docs: update the labels timespan info (#6019) * docs: update the labels timespan info * updated docs Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * updated docs Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> --- docs/sources/api/_index.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md index ccd245c5f0c75..5804a167a5a42 100644 --- a/docs/sources/api/_index.md +++ b/docs/sources/api/_index.md @@ -387,8 +387,9 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/query_range" --data-urlencode ' ## `GET /loki/api/v1/labels` -`/loki/api/v1/labels` retrieves the list of known labels within a given time span. It -accepts the following query parameters in the URL: +`/loki/api/v1/labels` retrieves the list of known labels within a given time span. +Loki may use a larger time span than the one specified. +It accepts the following query parameters in the URL: - `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago. - `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now. @@ -424,8 +425,8 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/labels" | jq ## `GET /loki/api/v1/label//values` `/loki/api/v1/label//values` retrieves the list of known values for a given -label within a given time span. It accepts the following query parameters in -the URL: +label within a given time span. Loki may use a larger time span than the one specified. +It accepts the following query parameters in the URL: - `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago. - `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now. From 14b0873f2b0a87bedd9d3ab222e865a777116133 Mon Sep 17 00:00:00 2001 From: n4mine Date: Thu, 5 May 2022 02:33:48 +0800 Subject: [PATCH 069/223] Revert "Docs: fix pic location in `sources/fundamentals/architecture/deployment-modes.md` (#6089)" (#6093) This reverts commit 1d25e17b321a13c451db3e4c5f58fd1a20629c9e. --- docs/sources/fundamentals/architecture/deployment-modes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/fundamentals/architecture/deployment-modes.md b/docs/sources/fundamentals/architecture/deployment-modes.md index 9055fc24b07bc..151d31a7cd330 100644 --- a/docs/sources/fundamentals/architecture/deployment-modes.md +++ b/docs/sources/fundamentals/architecture/deployment-modes.md @@ -28,7 +28,7 @@ This is monolithic mode; it runs all of Loki’s microservice components inside a single process as a single binary or Docker image. -![monolithic mode diagram](./monolithic-mode.png) +![monolithic mode diagram](../monolithic-mode.png) Monolithic mode is useful for getting started quickly to experiment with Loki, as well as for small read/write volumes of up to approximately 100GB per day. @@ -54,7 +54,7 @@ Loki provides the simple scalable deployment mode. This deployment mode can scale to several TBs of logs per day and more. Consider the microservices mode approach for very large Loki installations. -![simple scalable deployment mode diagram](./simple-scalable.png) +![simple scalable deployment mode diagram](../simple-scalable.png) In this mode the component microservices of Loki are bundled into two targets: `-target=read` and `-target=write`. @@ -89,7 +89,7 @@ Each process is invoked specifying its `target`: * ruler * compactor -![microservices mode diagram](./microservices-mode.png) +![microservices mode diagram](../microservices-mode.png) Running components as individual microservices allows scaling up by increasing the quantity of microservices. From 4f75939efbc7b76c67df88754492a819107f4d48 Mon Sep 17 00:00:00 2001 From: Ed Welch Date: Wed, 4 May 2022 15:52:55 -0400 Subject: [PATCH 070/223] Revert "Loki: Change live tailing to only allow mutating the log line not the number of streams. (#6063)" (#6097) This reverts commit 98fda9b0a063f5a4160802ccd8490f1aa18d3d54. --- CHANGELOG.md | 1 - docs/sources/upgrading/_index.md | 24 ------------- pkg/ingester/tailer.go | 59 ++++++++++++++++++-------------- 3 files changed, 34 insertions(+), 50 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07d36e3493ebf..dbbb274439f93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,7 +73,6 @@ ##### Fixes * [5685](https://github.com/grafana/loki/pull/5685) **chaudum**: Assert that push values tuples consist of string values ##### Changes -* [6063](https://github.com/grafana/loki/pull/6063) **slim-bean**: Changes tailing API responses to not split a stream when using parsers in the query * [6042](https://github.com/grafana/loki/pull/6042) **slim-bean**: Add a new configuration to allow fudging of ingested timestamps to guarantee sort order of duplicate timestamps at query time. * [5777](https://github.com/grafana/loki/pull/5777) **tatchiuleung**: storage: make Azure blobID chunk delimiter configurable * [5650](https://github.com/grafana/loki/pull/5650) **cyriltovena**: Remove more chunkstore and schema version below v9 diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md index 5dc4f8dfabd8e..4b012c2b7bb74 100644 --- a/docs/sources/upgrading/_index.md +++ b/docs/sources/upgrading/_index.md @@ -31,30 +31,6 @@ The output is incredibly verbose as it shows the entire internal config struct u ## Main / Unreleased -### Loki - -#### Tail API no longer creates multiple streams when using parsers. - -We expect this change to be non-impactful however it is a breaking change to existing behavior. - -This change would likely only affect anyone who's doing machine to machine type work with Loki's tail API -and is expecting a parser in a query to alter the streams in a tail response. - -Prior to this change a tail request with a parser (e.g. json, logfmt, regexp, pattern) would split the -incoming log stream into multiple streams based on the extracted labels after running the parser. - -[PR 6063](https://github.com/grafana/loki/pull/6063) changes this behavior -to no longer split incoming streams when using a parser in the query, instead Loki will return exactly -the same streams with and without a parser in the query. - -We found a significant performance impact when using parsers on live tailing queries which would -result in turning a single stream with multiple entries into multiple streams with single entries. -Often leading to the case where the tailing client could not keep up with the number of streams -being pushed and tailing logs being dropped. - -This change will have no impact on viewing the tail output from Grafana or logcli. -Parsers can still be used to do filtering and reformatting of live tailed log lines. - ## 2.5.0 ### Loki diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index 4a16f155de47f..4d034d7acb37b 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -28,7 +28,8 @@ type tailer struct { id uint32 orgID string matchers []*labels.Matcher - expr syntax.LogSelectorExpr + pipeline syntax.Pipeline + expr syntax.Expr pipelineMtx sync.Mutex sendChan chan *logproto.Stream @@ -51,9 +52,7 @@ func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*ta if err != nil { return nil, err } - // Make sure we can build a pipeline. The stream processing code doesn't have a place to handle - // this error so make sure we handle it here. - _, err = expr.Pipeline() + pipeline, err := expr.Pipeline() if err != nil { return nil, err } @@ -62,6 +61,7 @@ func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*ta return &tailer{ orgID: orgID, matchers: matchers, + pipeline: pipeline, sendChan: make(chan *logproto.Stream, bufferSizeForTailResponse), conn: conn, droppedStreams: make([]*logproto.DroppedStream, 0, maxDroppedStreams), @@ -121,44 +121,53 @@ func (t *tailer) send(stream logproto.Stream, lbs labels.Labels) { return } - processed := t.processStream(stream, lbs) - select { - case t.sendChan <- processed: - default: - t.dropStream(stream) + streams := t.processStream(stream, lbs) + if len(streams) == 0 { + return + } + for _, s := range streams { + select { + case t.sendChan <- s: + default: + t.dropStream(*s) + } } } -func (t *tailer) processStream(stream logproto.Stream, lbs labels.Labels) *logproto.Stream { - // Build a new pipeline for each call because the pipeline builds a cache of labels - // and if we don't start with a new pipeline that cache will grow unbounded. - // The error is ignored because it would be handled in the constructor of the tailer. - pipeline, _ := t.expr.Pipeline() - +func (t *tailer) processStream(stream logproto.Stream, lbs labels.Labels) []*logproto.Stream { // Optimization: skip filtering entirely, if no filter is set - if log.IsNoopPipeline(pipeline) { - return &stream + if log.IsNoopPipeline(t.pipeline) { + return []*logproto.Stream{&stream} } // pipeline are not thread safe and tailer can process multiple stream at once. t.pipelineMtx.Lock() defer t.pipelineMtx.Unlock() - responseStream := &logproto.Stream{ - Labels: stream.Labels, - Entries: make([]logproto.Entry, 0, len(stream.Entries)), - } - sp := pipeline.ForStream(lbs) + streams := map[uint64]*logproto.Stream{} + + sp := t.pipeline.ForStream(lbs) for _, e := range stream.Entries { - newLine, _, ok := sp.ProcessString(e.Timestamp.UnixNano(), e.Line) + newLine, parsedLbs, ok := sp.ProcessString(e.Timestamp.UnixNano(), e.Line) if !ok { continue } - responseStream.Entries = append(responseStream.Entries, logproto.Entry{ + var stream *logproto.Stream + if stream, ok = streams[parsedLbs.Hash()]; !ok { + stream = &logproto.Stream{ + Labels: parsedLbs.String(), + } + streams[parsedLbs.Hash()] = stream + } + stream.Entries = append(stream.Entries, logproto.Entry{ Timestamp: e.Timestamp, Line: newLine, }) } - return responseStream + streamsResult := make([]*logproto.Stream, 0, len(streams)) + for _, stream := range streams { + streamsResult = append(streamsResult, stream) + } + return streamsResult } // isMatching returns true if lbs matches all matchers. From 03153e89bffafb6551667ce00e20046089b3a4cc Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Wed, 4 May 2022 15:38:58 -0600 Subject: [PATCH 071/223] clean up the pr diff message (#6098) * clean up the pr diff message * add dependency to clone-main to avoid possible race * don't start tests before everything is cloned * formatting change --- .drone/drone.jsonnet | 4 ++-- .drone/drone.yml | 17 ++++++++++------- tools/diff_coverage.sh | 7 ++++++- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index 4c8e145b764f1..66fd88ab28f68 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -373,8 +373,8 @@ local manifest(apps) = pipeline('manifest') { steps: [ make('check-drone-drift', container=false) { depends_on: ['clone'] }, make('check-generated-files', container=false) { depends_on: ['clone'] }, - make('test', container=false) { depends_on: ['clone'] }, - run('clone-main', commands=['cd ..', 'git clone $CI_REPO_REMOTE loki-main', 'cd -']), + run('clone-main', commands=['cd ..', 'git clone $CI_REPO_REMOTE loki-main', 'cd -']) { depends_on: ['clone'] }, + make('test', container=false) { depends_on: ['clone', 'clone-main'] }, run('test-main', commands=['cd ../loki-main', 'BUILD_IN_CONTAINER=false make test']) { depends_on: ['clone-main'] }, make('compare-coverage', container=false, args=[ 'old=../loki-main/test_results.txt', diff --git a/.drone/drone.yml b/.drone/drone.yml index a9a1b250eb5fc..a5c5b8366c29a 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -51,19 +51,22 @@ steps: image: grafana/loki-build-image:0.20.4 name: check-generated-files - commands: - - make BUILD_IN_CONTAINER=false test + - cd .. + - git clone $CI_REPO_REMOTE loki-main + - cd - depends_on: - clone environment: {} image: grafana/loki-build-image:0.20.4 - name: test + name: clone-main - commands: - - cd .. - - git clone $CI_REPO_REMOTE loki-main - - cd - + - make BUILD_IN_CONTAINER=false test + depends_on: + - clone + - clone-main environment: {} image: grafana/loki-build-image:0.20.4 - name: clone-main + name: test - commands: - cd ../loki-main - BUILD_IN_CONTAINER=false make test @@ -1164,6 +1167,6 @@ kind: secret name: deploy_config --- kind: signature -hmac: 4596e741ac788d461b3bbb2429c1f61efabaf943aeec6b3cd59eeff8d769de5e +hmac: e3f0cead040a655e51244d5d71377a4ba506d7f63fae440593f6e7d14018a1e3 ... diff --git a/tools/diff_coverage.sh b/tools/diff_coverage.sh index 188821e91248b..ffb21eadf21b9 100755 --- a/tools/diff_coverage.sh +++ b/tools/diff_coverage.sh @@ -5,6 +5,11 @@ if [[ ! -f "$1" ]] || [[ ! -f "$2" ]]; then exit 0 fi +echo +echo +echo '**Change in test coverage per package. Green indicates 0 or positive change, red indicates that test coverage for a package fell.**' +echo + echo '```diff' for pkg in ${3//,/ }; do old=$(grep "pkg/${pkg}\s" "$1" | sed s/%// | awk '{print $5}') @@ -12,7 +17,7 @@ for pkg in ${3//,/ }; do echo | awk -v pkg="${pkg}" -v old="${old:-0}" -v new="${new:-0}" \ '{ sign=new - old < 0 ? "-" : "+" - printf ("%s %11s\t%s\n", sign, pkg, new - old) + printf ("%s %18s\t%s%%\n", sign, pkg, new - old) }' done echo '```' From b45efd4c2df0d54ed984a11777a5c24fc076a58c Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 5 May 2022 07:03:33 -0400 Subject: [PATCH 072/223] TSDB shipper + WAL (#6049) * begins speccing out TSDB Head * auto incrementing series ref + mempostings * mintime/maxtime methods * tsdb head IndexReader impl * head correctly populates ref lookup * tsdb head tests * adds prometheus license to tsdb head * linting * [WIP] speccing out tsdb head wal * fix length check and adds tsdb wal encoding tests * exposes wal structs & removes closed semantics * logs start time in the tsdb wal * wal interface + testing * exports walrecord + returns ref when appending * specs out head manager * tsdb head manager wal initialization * tsdb wal rotation * wals dont use node name, but tsdb files do * cleans up fn signature * multi tsdb idx now just wraps Index interfaces * no longer sorts indices when creating multi-idx * tenantHeads & HeadManger index impls * head mgr tests * bugfixes & head manager tests * tsdb dir selection now helper fns * period utility * pulls out more code to helpers, fixes some var races * head recovery is more generic * tsdb manager builds from wals * pulls more helpers out of headmanager * lockedIdx, Close() on idx, tsdbManager update * removes mmap from index reader implementation * tsdb file * adds tsdb shipper config and refactors initStore * removes unused tsdbManager code * implements stores.Index and stores.ChunkWriter for tsdb * chunk.Data now supports an Entries() method * moves walreader to new util/wal pkg to avoid circular dep + tsdb storage alignment * tsdb store * passes indexWriter to chunkWriter * build a tsdb per index bucket in according with shipper conventions * dont open tsdb files until necessary for indexshipper * tsdbManager Index impl * tsdb defaults + initStore fix for invalid looping * fixes UsingTSDB helper * disables deleteRequestStore when using TSDB * pass limits to tsdb store * always start headmanager for tsdb Signed-off-by: Owen Diehl * fixes copy bug Signed-off-by: Owen Diehl * more logging Signed-off-by: Owen Diehl * fixes duplicate tenant label bug Signed-off-by: Owen Diehl * debug logs, uses label builder, removes __name__=logs for tsdb Signed-off-by: Owen Diehl * tsdb fixes labels at earlier pt Signed-off-by: Owen Diehl * account for setting tenant label in head manager test * changing tsdb dir names * identifier interface, builder to tsdb pkg * tsdb version path prefix * fixes buildfromwals identifier * fixes tsdb shipper paths * split buckets once per user set * refactors combining single and multi tenant tsdb indices on shipper reads * indexshipper ignores old gzip logic * method name refactor * remove unused record type * removes v1 prefix in tsdb paths and refactores indices method * ignores double optimization in tsdb looking for multitenant idx, shipper handles this * removes 5-ln requirement on shipper tablename regexp * groups identifiers, begins removing multitenant prefix in shipped files * passses open fn to indexshipper * exposes RealByteSlice * TSDBFile no longer needs a file descriptor, parses gzip extensions * method signature fixing * stop masquerading as compressed indices post-download in indexshipper * variable bucket regexp * removes accidental configs committed * label matcher handling for multitenancy and metricname in tsdb * explicitly require fingerprint when creating tsdb index * only add tenant label when creating multitenant tsdb write fingerprints without synthetic tenant label strip out tenant labels from queries * linting + unused removal * more linting :( * goimports * removes uploadername from indexshipper * maxuint32 for arm32 builds * tsdb chunk filterer support * always set ingester name when using object storage index Co-authored-by: Sandeep Sukhani --- pkg/chunkenc/facade.go | 11 +- pkg/ingester/ingester.go | 3 +- pkg/ingester/recovery.go | 34 - pkg/loki/config_wrapper.go | 29 + pkg/loki/modules.go | 111 ++- pkg/storage/chunk/bigchunk.go | 4 + pkg/storage/chunk/interface.go | 2 + pkg/storage/config/schema_config.go | 39 +- pkg/storage/factory.go | 9 +- pkg/storage/store.go | 60 +- .../indexshipper/downloads/index_set.go | 76 +- .../indexshipper/downloads/table_manager.go | 9 +- pkg/storage/stores/indexshipper/shipper.go | 7 +- .../stores/indexshipper/uploads/index_set.go | 12 +- .../stores/indexshipper/uploads/table.go | 4 +- .../indexshipper/uploads/table_manager.go | 3 +- .../uploads/table_manager_test.go | 1 - .../stores/indexshipper/uploads/table_test.go | 3 +- .../stores/series/series_store_write.go | 8 +- .../stores/shipper/shipper_index_client.go | 24 +- .../stores/tsdb/{index => }/builder.go | 76 +- pkg/storage/stores/tsdb/chunkwriter.go | 100 +++ pkg/storage/stores/tsdb/compact.go | 44 +- pkg/storage/stores/tsdb/compact_test.go | 7 +- pkg/storage/stores/tsdb/head.go | 59 +- pkg/storage/stores/tsdb/head_manager.go | 663 ++++++++++++++++++ pkg/storage/stores/tsdb/head_manager_test.go | 310 ++++++++ pkg/storage/stores/tsdb/head_read.go | 4 +- pkg/storage/stores/tsdb/head_wal.go | 213 ++++++ pkg/storage/stores/tsdb/head_wal_test.go | 102 +++ pkg/storage/stores/tsdb/identifier.go | 202 ++++++ pkg/storage/stores/tsdb/identifier_test.go | 57 ++ pkg/storage/stores/tsdb/index.go | 24 + pkg/storage/stores/tsdb/index/chunk.go | 4 +- pkg/storage/stores/tsdb/index/chunk_test.go | 2 +- pkg/storage/stores/tsdb/index/index.go | 53 +- pkg/storage/stores/tsdb/index/index_test.go | 19 +- pkg/storage/stores/tsdb/index_client.go | 118 ++++ pkg/storage/stores/tsdb/lazy_index.go | 66 ++ pkg/storage/stores/tsdb/manager.go | 284 ++++++++ pkg/storage/stores/tsdb/multi_file_index.go | 67 +- .../stores/tsdb/multi_file_index_test.go | 2 +- pkg/storage/stores/tsdb/multitenant.go | 74 ++ pkg/storage/stores/tsdb/querier_test.go | 17 +- pkg/storage/stores/tsdb/single_file_index.go | 122 +++- .../stores/tsdb/single_file_index_test.go | 13 +- pkg/storage/stores/tsdb/util_test.go | 20 +- pkg/util/wal/reader.go | 42 ++ tools/tsdb/tsdb-map/main.go | 10 +- 49 files changed, 2926 insertions(+), 297 deletions(-) rename pkg/storage/stores/tsdb/{index => }/builder.go (63%) create mode 100644 pkg/storage/stores/tsdb/chunkwriter.go create mode 100644 pkg/storage/stores/tsdb/head_manager.go create mode 100644 pkg/storage/stores/tsdb/head_manager_test.go create mode 100644 pkg/storage/stores/tsdb/head_wal.go create mode 100644 pkg/storage/stores/tsdb/head_wal_test.go create mode 100644 pkg/storage/stores/tsdb/identifier.go create mode 100644 pkg/storage/stores/tsdb/identifier_test.go create mode 100644 pkg/storage/stores/tsdb/index_client.go create mode 100644 pkg/storage/stores/tsdb/lazy_index.go create mode 100644 pkg/storage/stores/tsdb/manager.go create mode 100644 pkg/storage/stores/tsdb/multitenant.go create mode 100644 pkg/util/wal/reader.go diff --git a/pkg/chunkenc/facade.go b/pkg/chunkenc/facade.go index 1db71c189cd65..142e3d48d0ef8 100644 --- a/pkg/chunkenc/facade.go +++ b/pkg/chunkenc/facade.go @@ -73,7 +73,9 @@ func (f Facade) Utilization() float64 { return f.c.Utilization() } -// Size implements encoding.Chunk. +// Size implements encoding.Chunk, which unfortunately uses +// the Size method to refer to the byte size and not the entry count +// like chunkenc.Chunk does. func (f Facade) Size() int { if f.c == nil { return 0 @@ -82,6 +84,13 @@ func (f Facade) Size() int { return f.c.CompressedSize() } +func (f Facade) Entries() int { + if f.c == nil { + return 0 + } + return f.c.Size() +} + // LokiChunk returns the chunkenc.Chunk. func (f Facade) LokiChunk() Chunk { return f.c diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index e12b9548bffb9..ed05e966000ad 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -38,6 +38,7 @@ import ( "github.com/grafana/loki/pkg/util" errUtil "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" + "github.com/grafana/loki/pkg/util/wal" "github.com/grafana/loki/pkg/validation" ) @@ -420,7 +421,7 @@ func (i *Ingester) starting(ctx context.Context) error { ) level.Info(util_log.Logger).Log("msg", "recovering from WAL") - segmentReader, segmentCloser, err := newWalReader(i.cfg.WAL.Dir, -1) + segmentReader, segmentCloser, err := wal.NewWalReader(i.cfg.WAL.Dir, -1) if err != nil { return err } diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go index cb27a14a27e90..86fc6305b13d4 100644 --- a/pkg/ingester/recovery.go +++ b/pkg/ingester/recovery.go @@ -30,40 +30,6 @@ func (NoopWALReader) Err() error { return nil } func (NoopWALReader) Record() []byte { return nil } func (NoopWALReader) Close() error { return nil } -// If startSegment is <0, it means all the segments. -func newWalReader(dir string, startSegment int) (*wal.Reader, io.Closer, error) { - var ( - segmentReader io.ReadCloser - err error - ) - if startSegment < 0 { - segmentReader, err = wal.NewSegmentsReader(dir) - if err != nil { - return nil, nil, err - } - } else { - first, last, err := wal.Segments(dir) - if err != nil { - return nil, nil, err - } - if startSegment > last { - return nil, nil, errors.New("start segment is beyond the last WAL segment") - } - if first > startSegment { - startSegment = first - } - segmentReader, err = wal.NewSegmentsRangeReader(wal.SegmentRange{ - Dir: dir, - First: startSegment, - Last: -1, // Till the end. - }) - if err != nil { - return nil, nil, err - } - } - return wal.NewReader(segmentReader), segmentReader, nil -} - func newCheckpointReader(dir string) (WALReader, io.Closer, error) { lastCheckpointDir, idx, err := lastCheckpoint(dir) if err != nil { diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index 0427d5bf486b0..2f1e19cf6fa98 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -103,6 +103,10 @@ func (c *ConfigWrapper) ApplyDynamicConfig() cfg.Source { betterBoltdbShipperDefaults(r, &defaults) } + if len(r.SchemaConfig.Configs) > 0 && config.UsingTSDB(r.SchemaConfig.Configs) { + betterTSDBShipperDefaults(r, &defaults) + } + applyFIFOCacheConfig(r) applyIngesterFinalSleep(r) applyIngesterReplicationFactor(r) @@ -497,6 +501,31 @@ func betterBoltdbShipperDefaults(cfg, defaults *ConfigWrapper) { } } +func betterTSDBShipperDefaults(cfg, defaults *ConfigWrapper) { + currentSchemaIdx := config.ActivePeriodConfig(cfg.SchemaConfig.Configs) + currentSchema := cfg.SchemaConfig.Configs[currentSchemaIdx] + + if cfg.StorageConfig.TSDBShipperConfig.SharedStoreType == defaults.StorageConfig.TSDBShipperConfig.SharedStoreType { + cfg.StorageConfig.TSDBShipperConfig.SharedStoreType = currentSchema.ObjectType + } + + if cfg.CompactorConfig.SharedStoreType == defaults.CompactorConfig.SharedStoreType { + cfg.CompactorConfig.SharedStoreType = currentSchema.ObjectType + } + + if cfg.Common.PathPrefix != "" { + prefix := strings.TrimSuffix(cfg.Common.PathPrefix, "/") + + if cfg.StorageConfig.TSDBShipperConfig.ActiveIndexDirectory == "" { + cfg.StorageConfig.TSDBShipperConfig.ActiveIndexDirectory = fmt.Sprintf("%s/tsdb-shipper-active", prefix) + } + + if cfg.StorageConfig.TSDBShipperConfig.CacheLocation == "" { + cfg.StorageConfig.TSDBShipperConfig.CacheLocation = fmt.Sprintf("%s/tsdb-shipper-cache", prefix) + } + } +} + // applyFIFOCacheConfig turns on FIFO cache for the chunk store and for the query range results, // but only if no other cache storage is configured (redis or memcache). // diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index d3660d1631b4c..62506a8e41fb4 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -48,6 +48,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/cache" chunk_util "github.com/grafana/loki/pkg/storage/chunk/client/util" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/indexshipper" "github.com/grafana/loki/pkg/storage/stores/series/index" "github.com/grafana/loki/pkg/storage/stores/shipper" "github.com/grafana/loki/pkg/storage/stores/shipper/compactor" @@ -382,19 +383,25 @@ func (t *Loki) initTableManager() (services.Service, error) { } func (t *Loki) initStore() (_ services.Service, err error) { + // Always set these configs + // TODO(owen-d): Do the same for TSDB when we add IndexGatewayClientConfig + t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Mode = t.Cfg.IndexGateway.Mode + t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Ring = t.indexGatewayRing + // If RF > 1 and current or upcoming index type is boltdb-shipper then disable index dedupe and write dedupe cache. // This is to ensure that index entries are replicated to all the boltdb files in ingesters flushing replicated data. - if t.Cfg.Ingester.LifecyclerConfig.RingConfig.ReplicationFactor > 1 && config.UsingBoltdbShipper(t.Cfg.SchemaConfig.Configs) { + if t.Cfg.Ingester.LifecyclerConfig.RingConfig.ReplicationFactor > 1 && config.UsingObjectStorageIndex(t.Cfg.SchemaConfig.Configs) { t.Cfg.ChunkStoreConfig.DisableIndexDeduplication = true t.Cfg.ChunkStoreConfig.WriteDedupeCacheConfig = cache.Config{} } - if config.UsingBoltdbShipper(t.Cfg.SchemaConfig.Configs) { + // Set configs pertaining to object storage based indices + if config.UsingObjectStorageIndex(t.Cfg.SchemaConfig.Configs) { t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterName = t.Cfg.Ingester.LifecyclerConfig.ID + t.Cfg.StorageConfig.TSDBShipperConfig.IngesterName = t.Cfg.Ingester.LifecyclerConfig.ID + switch true { case t.Cfg.isModuleEnabled(Ingester), t.Cfg.isModuleEnabled(Write): - // We do not want ingester to unnecessarily keep downloading files - t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeWriteOnly // Use fifo cache for caching index in memory, this also significantly helps performance. t.Cfg.StorageConfig.IndexQueriesCacheConfig = cache.Config{ EnableFifoCache: true, @@ -412,22 +419,53 @@ func (t *Loki) initStore() (_ services.Service, err error) { // have query gaps on chunks flushed after an index entry is cached by keeping them retained in the ingester // and queried as part of live data until the cache TTL expires on the index entry. t.Cfg.Ingester.RetainPeriod = t.Cfg.StorageConfig.IndexCacheValidity + 1*time.Minute - t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = boltdbShipperQuerierIndexUpdateDelay(t.Cfg) + + // We do not want ingester to unnecessarily keep downloading files + t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeWriteOnly + t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = shipperQuerierIndexUpdateDelay(t.Cfg.StorageConfig.IndexCacheValidity, t.Cfg.StorageConfig.BoltDBShipperConfig.ResyncInterval) + + t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeWriteOnly + t.Cfg.StorageConfig.TSDBShipperConfig.IngesterDBRetainPeriod = shipperQuerierIndexUpdateDelay(t.Cfg.StorageConfig.IndexCacheValidity, t.Cfg.StorageConfig.TSDBShipperConfig.ResyncInterval) + case t.Cfg.isModuleEnabled(Querier), t.Cfg.isModuleEnabled(Ruler), t.Cfg.isModuleEnabled(Read), t.isModuleActive(IndexGateway): // We do not want query to do any updates to index t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeReadOnly + t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadOnly default: t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeReadWrite - t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = boltdbShipperQuerierIndexUpdateDelay(t.Cfg) + t.Cfg.StorageConfig.BoltDBShipperConfig.IngesterDBRetainPeriod = shipperQuerierIndexUpdateDelay(t.Cfg.StorageConfig.IndexCacheValidity, t.Cfg.StorageConfig.BoltDBShipperConfig.ResyncInterval) + t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadWrite + t.Cfg.StorageConfig.TSDBShipperConfig.IngesterDBRetainPeriod = shipperQuerierIndexUpdateDelay(t.Cfg.StorageConfig.IndexCacheValidity, t.Cfg.StorageConfig.TSDBShipperConfig.ResyncInterval) + } } - t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Mode = t.Cfg.IndexGateway.Mode - t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Ring = t.indexGatewayRing + if config.UsingObjectStorageIndex(t.Cfg.SchemaConfig.Configs) { + var asyncStore bool + + shipperConfigIdx := config.ActivePeriodConfig(t.Cfg.SchemaConfig.Configs) + iTy := t.Cfg.SchemaConfig.Configs[shipperConfigIdx].IndexType + if iTy != config.BoltDBShipperType && iTy != config.TSDBType { + shipperConfigIdx++ + } + + // TODO(owen-d): make helper more agnostic between boltdb|tsdb + var resyncInterval time.Duration + switch t.Cfg.SchemaConfig.Configs[shipperConfigIdx].IndexType { + case config.BoltDBShipperType: + resyncInterval = t.Cfg.StorageConfig.BoltDBShipperConfig.ResyncInterval + case config.TSDBType: + resyncInterval = t.Cfg.StorageConfig.TSDBShipperConfig.ResyncInterval + } + + minIngesterQueryStoreDuration := shipperMinIngesterQueryStoreDuration( + t.Cfg.Ingester.MaxChunkAge, + shipperQuerierIndexUpdateDelay( + t.Cfg.StorageConfig.IndexCacheValidity, + resyncInterval, + ), + ) - var asyncStore bool - if config.UsingBoltdbShipper(t.Cfg.SchemaConfig.Configs) { - boltdbShipperMinIngesterQueryStoreDuration := boltdbShipperMinIngesterQueryStoreDuration(t.Cfg) switch true { case t.Cfg.isModuleEnabled(Querier), t.Cfg.isModuleEnabled(Ruler), t.Cfg.isModuleEnabled(Read): // Do not use the AsyncStore if the querier is configured with QueryStoreOnly set to true @@ -439,30 +477,34 @@ func (t *Loki) initStore() (_ services.Service, err error) { asyncStore = true case t.Cfg.isModuleEnabled(IndexGateway): // we want to use the actual storage when running the index-gateway, so we remove the Addr from the config + // TODO(owen-d): Do the same for TSDB when we add IndexGatewayClientConfig t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Disabled = true case t.Cfg.isModuleEnabled(All): // We want ingester to also query the store when using boltdb-shipper but only when running with target All. // We do not want to use AsyncStore otherwise it would start spiraling around doing queries over and over again to the ingesters and store. // ToDo: See if we can avoid doing this when not running loki in clustered mode. t.Cfg.Ingester.QueryStore = true - boltdbShipperConfigIdx := config.ActivePeriodConfig(t.Cfg.SchemaConfig.Configs) - if t.Cfg.SchemaConfig.Configs[boltdbShipperConfigIdx].IndexType != config.BoltDBShipperType { - boltdbShipperConfigIdx++ - } - mlb, err := calculateMaxLookBack(t.Cfg.SchemaConfig.Configs[boltdbShipperConfigIdx], t.Cfg.Ingester.QueryStoreMaxLookBackPeriod, - boltdbShipperMinIngesterQueryStoreDuration) + + mlb, err := calculateMaxLookBack( + t.Cfg.SchemaConfig.Configs[shipperConfigIdx], + t.Cfg.Ingester.QueryStoreMaxLookBackPeriod, + minIngesterQueryStoreDuration, + ) if err != nil { return nil, err } t.Cfg.Ingester.QueryStoreMaxLookBackPeriod = mlb } - } - if asyncStore { - t.Cfg.StorageConfig.EnableAsyncStore = true - t.Cfg.StorageConfig.AsyncStoreConfig = storage.AsyncStoreCfg{ - IngesterQuerier: t.ingesterQuerier, - QueryIngestersWithin: calculateAsyncStoreQueryIngestersWithin(t.Cfg.Querier.QueryIngestersWithin, boltdbShipperMinIngesterQueryStoreDuration(t.Cfg)), + if asyncStore { + t.Cfg.StorageConfig.EnableAsyncStore = true + t.Cfg.StorageConfig.AsyncStoreConfig = storage.AsyncStoreCfg{ + IngesterQuerier: t.ingesterQuerier, + QueryIngestersWithin: calculateAsyncStoreQueryIngestersWithin( + t.Cfg.Querier.QueryIngestersWithin, + minIngesterQueryStoreDuration, + ), + } } } @@ -908,6 +950,11 @@ func (t *Loki) initUsageReport() (services.Service, error) { } func (t *Loki) deleteRequestsStore() (deletion.DeleteRequestsStore, error) { + // TODO(owen-d): enable delete request storage in tsdb + if config.UsingTSDB(t.Cfg.SchemaConfig.Configs) { + return deletion.NewNoOpDeleteRequestsStore(), nil + } + filteringEnabled, err := deletion.FilteringEnabled(t.Cfg.CompactorConfig.DeletionMode) if err != nil { return nil, err @@ -954,24 +1001,24 @@ func calculateAsyncStoreQueryIngestersWithin(queryIngestersWithinConfig, minDura return queryIngestersWithinConfig } -// boltdbShipperQuerierIndexUpdateDelay returns duration it could take for queriers to serve the index since it was uploaded. +// shipperQuerierIndexUpdateDelay returns duration it could take for queriers to serve the index since it was uploaded. // It considers upto 3 sync attempts for the indexgateway/queries to be successful in syncing the files to factor in worst case scenarios like // failures in sync, low download throughput, various kinds of caches in between etc. which can delay the sync operation from getting all the updates from the storage. // It also considers index cache validity because a querier could have cached index just before it was going to resync which means // it would keep serving index until the cache entries expire. -func boltdbShipperQuerierIndexUpdateDelay(cfg Config) time.Duration { - return cfg.StorageConfig.IndexCacheValidity + cfg.StorageConfig.BoltDBShipperConfig.ResyncInterval*3 +func shipperQuerierIndexUpdateDelay(cacheValidity, resyncInterval time.Duration) time.Duration { + return cacheValidity + resyncInterval*3 } -// boltdbShipperIngesterIndexUploadDelay returns duration it could take for an index file containing id of a chunk to be uploaded to the shared store since it got flushed. -func boltdbShipperIngesterIndexUploadDelay() time.Duration { +// shipperIngesterIndexUploadDelay returns duration it could take for an index file containing id of a chunk to be uploaded to the shared store since it got flushed. +func shipperIngesterIndexUploadDelay() time.Duration { return uploads.ShardDBsByDuration + shipper.UploadInterval } -// boltdbShipperMinIngesterQueryStoreDuration returns minimum duration(with some buffer) ingesters should query their stores to -// avoid queriers from missing any logs or chunk ids due to async nature of BoltDB Shipper. -func boltdbShipperMinIngesterQueryStoreDuration(cfg Config) time.Duration { - return cfg.Ingester.MaxChunkAge + boltdbShipperIngesterIndexUploadDelay() + boltdbShipperQuerierIndexUpdateDelay(cfg) + 5*time.Minute +// shipperMinIngesterQueryStoreDuration returns minimum duration(with some buffer) ingesters should query their stores to +// avoid missing any logs or chunk ids due to async nature of shipper. +func shipperMinIngesterQueryStoreDuration(maxChunkAge, querierUpdateDelay time.Duration) time.Duration { + return maxChunkAge + shipperIngesterIndexUploadDelay() + querierUpdateDelay + 5*time.Minute } // NewServerService constructs service from Server component. diff --git a/pkg/storage/chunk/bigchunk.go b/pkg/storage/chunk/bigchunk.go index f131a23f7f803..4b01ab01f6334 100644 --- a/pkg/storage/chunk/bigchunk.go +++ b/pkg/storage/chunk/bigchunk.go @@ -34,6 +34,10 @@ func newBigchunk() *bigchunk { return &bigchunk{} } +// TODO(owen-d): remove bigchunk from our code, we don't use it. +// Hack an Entries() impl +func (b *bigchunk) Entries() int { return 0 } + func (b *bigchunk) Add(sample model.SamplePair) (Data, error) { if b.remainingSamples == 0 { if bigchunkSizeCapBytes > 0 && b.Size() > bigchunkSizeCapBytes { diff --git a/pkg/storage/chunk/interface.go b/pkg/storage/chunk/interface.go index 159a8a788ec53..ae6cef89c8c1a 100644 --- a/pkg/storage/chunk/interface.go +++ b/pkg/storage/chunk/interface.go @@ -52,6 +52,8 @@ type Data interface { Rebound(start, end model.Time, filter filter.Func) (Data, error) // Size returns the approximate length of the chunk in bytes. Size() int + // Entries returns the number of entries in a chunk + Entries() int Utilization() float64 } diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go index e5081460c6fbc..18b48433b55dd 100644 --- a/pkg/storage/config/schema_config.go +++ b/pkg/storage/config/schema_config.go @@ -39,6 +39,7 @@ const ( StorageTypeSwift = "swift" // BoltDBShipperType holds the index type for using boltdb with shipper which keeps flushing them to a shared storage BoltDBShipperType = "boltdb-shipper" + TSDBType = "tsdb" ) var ( @@ -184,17 +185,47 @@ func ActivePeriodConfig(configs []PeriodConfig) int { return i } -// UsingBoltdbShipper checks whether current or the next index type is boltdb-shipper, returns true if yes. -func UsingBoltdbShipper(configs []PeriodConfig) bool { +func usingForPeriodConfigs(configs []PeriodConfig, fn func(PeriodConfig) bool) bool { activePCIndex := ActivePeriodConfig(configs) - if configs[activePCIndex].IndexType == BoltDBShipperType || - (len(configs)-1 > activePCIndex && configs[activePCIndex+1].IndexType == BoltDBShipperType) { + + if fn(configs[activePCIndex]) || + (len(configs)-1 > activePCIndex && fn(configs[activePCIndex+1])) { return true } return false } +func UsingObjectStorageIndex(configs []PeriodConfig) bool { + fn := func(cfg PeriodConfig) bool { + switch cfg.IndexType { + case BoltDBShipperType, TSDBType: + return true + default: + return false + } + } + + return usingForPeriodConfigs(configs, fn) +} + +// UsingBoltdbShipper checks whether current or the next index type is boltdb-shipper, returns true if yes. +func UsingBoltdbShipper(configs []PeriodConfig) bool { + fn := func(cfg PeriodConfig) bool { + return cfg.IndexType == BoltDBShipperType + } + + return usingForPeriodConfigs(configs, fn) +} + +func UsingTSDB(configs []PeriodConfig) bool { + fn := func(cfg PeriodConfig) bool { + return cfg.IndexType == TSDBType + } + + return usingForPeriodConfigs(configs, fn) +} + func defaultRowShards(schema string) uint32 { switch schema { case "v1", "v2", "v3", "v4", "v5", "v6", "v9": diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index d22fee324b265..e800dbf7fa1e8 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/client/openstack" "github.com/grafana/loki/pkg/storage/chunk/client/testutils" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/indexshipper" "github.com/grafana/loki/pkg/storage/stores/series/index" "github.com/grafana/loki/pkg/storage/stores/shipper" "github.com/grafana/loki/pkg/storage/stores/shipper/downloads" @@ -61,8 +62,9 @@ type Config struct { DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"` MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"` - MaxChunkBatchSize int `yaml:"max_chunk_batch_size"` - BoltDBShipperConfig shipper.Config `yaml:"boltdb_shipper"` + MaxChunkBatchSize int `yaml:"max_chunk_batch_size"` + BoltDBShipperConfig shipper.Config `yaml:"boltdb_shipper"` + TSDBShipperConfig indexshipper.Config `yaml:"tsdb_shipper"` // Config for using AsyncStore when using async index stores like `boltdb-shipper`. // It is required for getting chunk ids of recently flushed chunks from the ingesters. @@ -89,6 +91,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxParallelGetChunk, "store.max-parallel-get-chunk", 150, "Maximum number of parallel chunk reads.") cfg.BoltDBShipperConfig.RegisterFlags(f) f.IntVar(&cfg.MaxChunkBatchSize, "store.max-chunk-batch-size", 50, "The maximum number of chunks to fetch per batch.") + cfg.TSDBShipperConfig.RegisterFlagsWithPrefix("tsdb.", f) } // Validate config and returns error on failure @@ -150,7 +153,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg config.SchemaConfig, limi return boltDBIndexClientWithShipper, nil } - if shouldUseIndexGatewayClient(cfg) { + if shouldUseBoltDBIndexGatewayClient(cfg) { gateway, err := shipper.NewGatewayClient(cfg.BoltDBShipperConfig.IndexGatewayClientConfig, registerer, util_log.Logger) if err != nil { return nil, err diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 926a535cabb6a..c13f4f9f0a660 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -23,13 +23,16 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/fetcher" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores" + "github.com/grafana/loki/pkg/storage/stores/indexshipper" "github.com/grafana/loki/pkg/storage/stores/series" "github.com/grafana/loki/pkg/storage/stores/series/index" "github.com/grafana/loki/pkg/storage/stores/shipper" "github.com/grafana/loki/pkg/storage/stores/shipper/indexgateway" + "github.com/grafana/loki/pkg/storage/stores/tsdb" "github.com/grafana/loki/pkg/usagestats" "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/deletion" + util_log "github.com/grafana/loki/pkg/util/log" ) var ( @@ -184,7 +187,7 @@ func (s *store) chunkClientForPeriod(p config.PeriodConfig) (client.Client, erro return chunks, nil } -func shouldUseIndexGatewayClient(cfg Config) bool { +func shouldUseBoltDBIndexGatewayClient(cfg Config) bool { if cfg.BoltDBShipperConfig.Mode != shipper.ModeReadOnly || cfg.BoltDBShipperConfig.IndexGatewayClientConfig.Disabled { return false } @@ -198,11 +201,60 @@ func shouldUseIndexGatewayClient(cfg Config) bool { } func (s *store) storeForPeriod(p config.PeriodConfig, chunkClient client.Client, f *fetcher.Fetcher) (stores.ChunkWriter, stores.Index, func(), error) { - // todo switch tsdb. - indexClientReg := prometheus.WrapRegistererWith( prometheus.Labels{"component": "index-store-" + p.From.String()}, s.registerer) + if p.IndexType == config.TSDBType { + var ( + nodeName = s.cfg.TSDBShipperConfig.IngesterName + dir = s.cfg.TSDBShipperConfig.ActiveIndexDirectory + ) + tsdbMetrics := tsdb.NewMetrics(indexClientReg) + objectClient, err := NewObjectClient(s.cfg.TSDBShipperConfig.SharedStoreType, s.cfg, s.clientMetrics) + if err != nil { + return nil, nil, nil, err + } + + shpr, err := indexshipper.NewIndexShipper( + s.cfg.TSDBShipperConfig, + objectClient, + s.limits, + tsdb.OpenShippableTSDB, + ) + if err != nil { + return nil, nil, nil, err + } + tsdbManager := tsdb.NewTSDBManager( + nodeName, + dir, + shpr, + p.IndexTables.Period, + util_log.Logger, + tsdbMetrics, + ) + // TODO(owen-d): Only need HeadManager + // on the ingester. Otherwise, the TSDBManager is sufficient + headManager := tsdb.NewHeadManager( + util_log.Logger, + dir, + tsdbMetrics, + tsdbManager, + ) + if err := headManager.Start(); err != nil { + return nil, nil, nil, err + } + idx := tsdb.NewIndexClient(headManager, p) + writer := tsdb.NewChunkWriter(f, p, headManager) + + // TODO(owen-d): add TSDB index-gateway support + + return writer, idx, + func() { + chunkClient.Stop() + f.Stop() + }, nil + } + idx, err := NewIndexClient(p.IndexType, s.cfg, s.schemaCfg, s.limits, s.clientMetrics, indexClientReg) if err != nil { return nil, nil, nil, errors.Wrap(err, "error creating index client") @@ -222,7 +274,7 @@ func (s *store) storeForPeriod(p config.PeriodConfig, chunkClient client.Client, index stores.Index = seriesdIndex ) - if shouldUseIndexGatewayClient(s.cfg) { + if shouldUseBoltDBIndexGatewayClient(s.cfg) { // inject the index-gateway client into the index store gw, err := shipper.NewGatewayClient(s.cfg.BoltDBShipperConfig.IndexGatewayClientConfig, indexClientReg, s.logger) if err != nil { diff --git a/pkg/storage/stores/indexshipper/downloads/index_set.go b/pkg/storage/stores/indexshipper/downloads/index_set.go index d9fa6dcda2fe1..262e07b480e9e 100644 --- a/pkg/storage/stores/indexshipper/downloads/index_set.go +++ b/pkg/storage/stores/indexshipper/downloads/index_set.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "sync" "time" @@ -14,6 +15,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/concurrency" + "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/storage/chunk/client/util" "github.com/grafana/loki/pkg/storage/stores/indexshipper/index" "github.com/grafana/loki/pkg/storage/stores/shipper/storage" @@ -22,6 +24,10 @@ import ( "github.com/grafana/loki/pkg/util/spanlogger" ) +const ( + gzipExtension = ".gz" +) + type IndexSet interface { Init() error Close() @@ -300,11 +306,12 @@ func (t *indexSet) checkStorageForUpdates(ctx context.Context, lock bool) (toDow } for _, file := range files { - listedDBs[file.Name] = struct{}{} + normalized := strings.TrimSuffix(file.Name, gzipExtension) + listedDBs[normalized] = struct{}{} // Checking whether file was already downloaded, if not, download it. // We do not ever upload files in the object store with the same name but different contents so we do not consider downloading modified files again. - _, ok := t.index[file.Name] + _, ok := t.index[normalized] if !ok { toDownload = append(toDownload, file) } @@ -323,11 +330,65 @@ func (t *indexSet) AwaitReady(ctx context.Context) error { return t.indexMtx.awaitReady(ctx) } -func (t *indexSet) downloadFileFromStorage(ctx context.Context, fileName, folderPathForTable string) error { - return shipper_util.DownloadFileFromStorage(filepath.Join(folderPathForTable, fileName), shipper_util.IsCompressedFile(fileName), - true, shipper_util.LoggerWithFilename(t.logger, fileName), func() (io.ReadCloser, error) { +func (t *indexSet) downloadFileFromStorage(ctx context.Context, fileName, folderPathForTable string) (string, error) { + decompress := shipper_util.IsCompressedFile(fileName) + dst := filepath.Join(folderPathForTable, fileName) + if decompress { + dst = strings.Trim(dst, gzipExtension) + } + return filepath.Base(dst), downloadFileFromStorage( + dst, + decompress, + true, + shipper_util.LoggerWithFilename(t.logger, fileName), + func() (io.ReadCloser, error) { return t.baseIndexSet.GetFile(ctx, t.tableName, t.userID, fileName) - }) + }, + ) +} + +// DownloadFileFromStorage downloads a file from storage to given location. +func downloadFileFromStorage(destination string, decompressFile bool, sync bool, logger log.Logger, getFileFunc shipper_util.GetFileFunc) error { + start := time.Now() + readCloser, err := getFileFunc() + if err != nil { + return err + } + + defer func() { + if err := readCloser.Close(); err != nil { + level.Error(logger).Log("msg", "failed to close read closer", "err", err) + } + }() + + f, err := os.Create(destination) + if err != nil { + return err + } + + defer func() { + if err := f.Close(); err != nil { + level.Warn(logger).Log("msg", "failed to close file", "file", destination) + } + }() + var objectReader io.Reader = readCloser + if decompressFile { + decompressedReader := chunkenc.Gzip.GetReader(readCloser) + defer chunkenc.Gzip.PutReader(decompressedReader) + + objectReader = decompressedReader + } + + _, err = io.Copy(f, objectReader) + if err != nil { + return err + } + + level.Info(logger).Log("msg", "downloaded file", "total_time", time.Since(start)) + if sync { + return f.Sync() + } + return nil } // doConcurrentDownload downloads objects(files) concurrently. It ignores only missing file errors caused by removal of file by compaction. @@ -337,8 +398,7 @@ func (t *indexSet) doConcurrentDownload(ctx context.Context, files []storage.Ind downloadedFilesMtx := sync.Mutex{} err := concurrency.ForEachJob(ctx, len(files), maxDownloadConcurrency, func(ctx context.Context, idx int) error { - fileName := files[idx].Name - err := t.downloadFileFromStorage(ctx, fileName, t.cacheLocation) + fileName, err := t.downloadFileFromStorage(ctx, files[idx].Name, t.cacheLocation) if err != nil { if t.baseIndexSet.IsFileNotFoundErr(err) { level.Info(util_log.Logger).Log("msg", fmt.Sprintf("ignoring missing file %s, possibly removed during compaction", fileName)) diff --git a/pkg/storage/stores/indexshipper/downloads/table_manager.go b/pkg/storage/stores/indexshipper/downloads/table_manager.go index caf1a7b358f7a..bd100512de845 100644 --- a/pkg/storage/stores/indexshipper/downloads/table_manager.go +++ b/pkg/storage/stores/indexshipper/downloads/table_manager.go @@ -241,18 +241,19 @@ func (tm *tableManager) ensureQueryReadiness(ctx context.Context) error { return err } - // regex for finding daily tables which have a 5 digit number at the end. - re, err := regexp.Compile(`.+[0-9]{5}$`) + // regexp for finding the trailing index bucket number at the end + re, err := regexp.Compile(`[0-9]+$`) if err != nil { return err } for _, tableName := range tables { - if !re.MatchString(tableName) { + match := re.Find([]byte(tableName)) + if match == nil { continue } - tableNumber, err := strconv.ParseInt(tableName[len(tableName)-5:], 10, 64) + tableNumber, err := strconv.ParseInt(string(match), 10, 64) if err != nil { return err } diff --git a/pkg/storage/stores/indexshipper/shipper.go b/pkg/storage/stores/indexshipper/shipper.go index 9353fa5af55b5..fd433c424be29 100644 --- a/pkg/storage/stores/indexshipper/shipper.go +++ b/pkg/storage/stores/indexshipper/shipper.go @@ -55,7 +55,6 @@ type Config struct { ResyncInterval time.Duration `yaml:"resync_interval"` QueryReadyNumDays int `yaml:"query_ready_num_days"` - UploaderName string IngesterName string Mode Mode IngesterDBRetainPeriod time.Duration @@ -83,9 +82,10 @@ type indexShipper struct { // NewIndexShipper creates a shipper for providing index store functionality using index files and object storage. // It manages the whole life cycle of uploading the index and downloading the index at query time. -func NewIndexShipper(cfg Config, storageClient client.ObjectClient, limits downloads.Limits) (IndexShipper, error) { +func NewIndexShipper(cfg Config, storageClient client.ObjectClient, limits downloads.Limits, open index.OpenIndexFileFunc) (IndexShipper, error) { shipper := indexShipper{ - cfg: cfg, + cfg: cfg, + openIndexFileFunc: open, } err := shipper.init(storageClient, limits) @@ -103,7 +103,6 @@ func (s *indexShipper) init(storageClient client.ObjectClient, limits downloads. if s.cfg.Mode != ModeReadOnly { cfg := uploads.Config{ - Uploader: s.cfg.UploaderName, UploadInterval: UploadInterval, DBRetainPeriod: s.cfg.IngesterDBRetainPeriod, } diff --git a/pkg/storage/stores/indexshipper/uploads/index_set.go b/pkg/storage/stores/indexshipper/uploads/index_set.go index 1ffdba8dd88b2..e99181169751a 100644 --- a/pkg/storage/stores/indexshipper/uploads/index_set.go +++ b/pkg/storage/stores/indexshipper/uploads/index_set.go @@ -31,7 +31,6 @@ type indexSet struct { storageIndexSet storage.IndexSet tableName, userID string logger log.Logger - uploader string index map[string]index.Index indexMtx sync.RWMutex @@ -238,14 +237,5 @@ func (t *indexSet) removeIndex(name string) error { } func (t *indexSet) buildFileName(indexName string) string { - // Files are stored with - - fileName := fmt.Sprintf("%s-%s", t.uploader, indexName) - - // if the file is a migrated one then don't add its name to the object key otherwise we would re-upload them again here with a different name. - // This is kept for historic reasons of boltdb-shipper. - if t.tableName == indexName { - fileName = t.uploader - } - - return fmt.Sprintf("%s.gz", fileName) + return fmt.Sprintf("%s.gz", indexName) } diff --git a/pkg/storage/stores/indexshipper/uploads/table.go b/pkg/storage/stores/indexshipper/uploads/table.go index 50cdc077c460b..f88283581eb9d 100644 --- a/pkg/storage/stores/indexshipper/uploads/table.go +++ b/pkg/storage/stores/indexshipper/uploads/table.go @@ -32,7 +32,6 @@ type Table interface { // All the public methods are concurrency safe and take care of mutexes to avoid any data race. type table struct { name string - uploader string baseUserIndexSet, baseCommonIndexSet storage.IndexSet logger log.Logger @@ -41,10 +40,9 @@ type table struct { } // NewTable create a new table instance. -func NewTable(name, uploader string, storageClient storage.Client) Table { +func NewTable(name string, storageClient storage.Client) Table { return &table{ name: name, - uploader: uploader, baseUserIndexSet: storage.NewIndexSet(storageClient, true), baseCommonIndexSet: storage.NewIndexSet(storageClient, false), logger: log.With(util_log.Logger, "table-name", name), diff --git a/pkg/storage/stores/indexshipper/uploads/table_manager.go b/pkg/storage/stores/indexshipper/uploads/table_manager.go index 285f7e6bd31f8..ec99af79e2858 100644 --- a/pkg/storage/stores/indexshipper/uploads/table_manager.go +++ b/pkg/storage/stores/indexshipper/uploads/table_manager.go @@ -13,7 +13,6 @@ import ( ) type Config struct { - Uploader string UploadInterval time.Duration DBRetainPeriod time.Duration } @@ -101,7 +100,7 @@ func (tm *tableManager) getOrCreateTable(tableName string) Table { table, ok = tm.tables[tableName] if !ok { - table = NewTable(tableName, tm.cfg.Uploader, tm.storageClient) + table = NewTable(tableName, tm.storageClient) tm.tables[tableName] = table } } diff --git a/pkg/storage/stores/indexshipper/uploads/table_manager_test.go b/pkg/storage/stores/indexshipper/uploads/table_manager_test.go index 71267fa3e3501..f1f31263865e0 100644 --- a/pkg/storage/stores/indexshipper/uploads/table_manager_test.go +++ b/pkg/storage/stores/indexshipper/uploads/table_manager_test.go @@ -30,7 +30,6 @@ func buildTestTableManager(t *testing.T, testDir string) (TableManager, stopFunc storageClient := buildTestStorageClient(t, testDir) cfg := Config{ - Uploader: "test-table-manager", UploadInterval: time.Hour, } tm, err := NewTableManager(cfg, storageClient) diff --git a/pkg/storage/stores/indexshipper/uploads/table_test.go b/pkg/storage/stores/indexshipper/uploads/table_test.go index b124a7ac51e65..c518cf29c3c4a 100644 --- a/pkg/storage/stores/indexshipper/uploads/table_test.go +++ b/pkg/storage/stores/indexshipper/uploads/table_test.go @@ -13,13 +13,12 @@ import ( const ( testTableName = "test-table" - uploader = "test-uploader" ) func TestTable(t *testing.T) { tempDir := t.TempDir() storageClient := buildTestStorageClient(t, tempDir) - testTable := NewTable(testTableName, uploader, storageClient) + testTable := NewTable(testTableName, storageClient) defer testTable.Stop() for userIdx := 0; userIdx < 2; userIdx++ { diff --git a/pkg/storage/stores/series/series_store_write.go b/pkg/storage/stores/series/series_store_write.go index 73e47f21fe622..790d520a586cc 100644 --- a/pkg/storage/stores/series/series_store_write.go +++ b/pkg/storage/stores/series/series_store_write.go @@ -20,13 +20,13 @@ import ( ) var ( - dedupedChunksTotal = promauto.NewCounter(prometheus.CounterOpts{ + DedupedChunksTotal = promauto.NewCounter(prometheus.CounterOpts{ Namespace: "loki", Name: "chunk_store_deduped_chunks_total", Help: "Count of chunks which were not stored because they have already been stored by another replica.", }) - indexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{ + IndexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: "loki", Name: "chunk_store_index_entries_per_chunk", Help: "Number of entries written to storage per chunk.", @@ -86,7 +86,7 @@ func (c *Writer) PutOne(ctx context.Context, from, through model.Time, chk chunk if len(found) > 0 { writeChunk = false - dedupedChunksTotal.Inc() + DedupedChunksTotal.Inc() } // If we dont have to write the chunk and DisableIndexDeduplication is false, we do not have to do anything. @@ -170,7 +170,7 @@ func (c *Writer) calculateIndexEntries(ctx context.Context, from, through model. } entries = append(entries, chunkEntries...) - indexEntriesPerChunk.Observe(float64(len(entries))) + IndexEntriesPerChunk.Observe(float64(len(entries))) // Remove duplicate entries based on tableName:hashValue:rangeValue result := c.indexWriter.NewWriteBatch() diff --git a/pkg/storage/stores/shipper/shipper_index_client.go b/pkg/storage/stores/shipper/shipper_index_client.go index 700f7508a0e45..c57e53fbdff31 100644 --- a/pkg/storage/stores/shipper/shipper_index_client.go +++ b/pkg/storage/stores/shipper/shipper_index_client.go @@ -80,16 +80,20 @@ type Config struct { // RegisterFlags registers flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.IndexGatewayClientConfig.RegisterFlagsWithPrefix("boltdb.shipper.index-gateway-client", f) - - f.StringVar(&cfg.ActiveIndexDirectory, "boltdb.shipper.active-index-directory", "", "Directory where ingesters would write boltdb files which would then be uploaded by shipper to configured storage") - f.StringVar(&cfg.SharedStoreType, "boltdb.shipper.shared-store", "", "Shared store for keeping boltdb files. Supported types: gcs, s3, azure, filesystem") - f.StringVar(&cfg.SharedStoreKeyPrefix, "boltdb.shipper.shared-store.key-prefix", "index/", "Prefix to add to Object Keys in Shared store. Path separator(if any) should always be a '/'. Prefix should never start with a separator but should always end with it") - f.StringVar(&cfg.CacheLocation, "boltdb.shipper.cache-location", "", "Cache location for restoring boltDB files for queries") - f.DurationVar(&cfg.CacheTTL, "boltdb.shipper.cache-ttl", 24*time.Hour, "TTL for boltDB files restored in cache for queries") - f.DurationVar(&cfg.ResyncInterval, "boltdb.shipper.resync-interval", 5*time.Minute, "Resync downloaded files with the storage") - f.IntVar(&cfg.QueryReadyNumDays, "boltdb.shipper.query-ready-num-days", 0, "Number of days of common index to be kept downloaded for queries. For per tenant index query readiness, use limits overrides config.") - f.BoolVar(&cfg.BuildPerTenantIndex, "boltdb.shipper.build-per-tenant-index", false, "Build per tenant index files") + cfg.RegisterFlagsWithPrefix("boltdb.", f) +} + +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.IndexGatewayClientConfig.RegisterFlagsWithPrefix(prefix+"shipper.index-gateway-client", f) + + f.StringVar(&cfg.ActiveIndexDirectory, prefix+"shipper.active-index-directory", "", "Directory where ingesters would write boltdb files which would then be uploaded by shipper to configured storage") + f.StringVar(&cfg.SharedStoreType, prefix+"shipper.shared-store", "", "Shared store for keeping boltdb files. Supported types: gcs, s3, azure, filesystem") + f.StringVar(&cfg.SharedStoreKeyPrefix, prefix+"shipper.shared-store.key-prefix", "index/", "Prefix to add to Object Keys in Shared store. Path separator(if any) should always be a '/'. Prefix should never start with a separator but should always end with it") + f.StringVar(&cfg.CacheLocation, prefix+"shipper.cache-location", "", "Cache location for restoring boltDB files for queries") + f.DurationVar(&cfg.CacheTTL, prefix+"shipper.cache-ttl", 24*time.Hour, "TTL for boltDB files restored in cache for queries") + f.DurationVar(&cfg.ResyncInterval, prefix+"shipper.resync-interval", 5*time.Minute, "Resync downloaded files with the storage") + f.IntVar(&cfg.QueryReadyNumDays, prefix+"shipper.query-ready-num-days", 0, "Number of days of common index to be kept downloaded for queries. For per tenant index query readiness, use limits overrides config.") + f.BoolVar(&cfg.BuildPerTenantIndex, prefix+"shipper.build-per-tenant-index", false, "Build per tenant index files") } func (cfg *Config) Validate() error { diff --git a/pkg/storage/stores/tsdb/index/builder.go b/pkg/storage/stores/tsdb/builder.go similarity index 63% rename from pkg/storage/stores/tsdb/index/builder.go rename to pkg/storage/stores/tsdb/builder.go index 99cd1ec9ea005..3bfb2802ceda3 100644 --- a/pkg/storage/stores/tsdb/index/builder.go +++ b/pkg/storage/stores/tsdb/builder.go @@ -1,4 +1,4 @@ -package index +package tsdb import ( "context" @@ -13,33 +13,9 @@ import ( "github.com/prometheus/prometheus/storage" chunk_util "github.com/grafana/loki/pkg/storage/chunk/client/util" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" ) -// Identifier has all the information needed to resolve a TSDB index -// Notably this abstracts away OS path separators, etc. -type Identifier struct { - Tenant string - From, Through model.Time - Checksum uint32 -} - -func (i Identifier) String() string { - return filepath.Join( - i.Tenant, - fmt.Sprintf( - "%s-%d-%d-%x.tsdb", - IndexFilename, - i.From, - i.Through, - i.Checksum, - ), - ) -} - -func (i Identifier) FilePath(parentDir string) string { - return filepath.Join(parentDir, i.String()) -} - // Builder is a helper used to create tsdb indices. // It can accept streams in any order and will create the tsdb // index appropriately via `Build()` @@ -51,19 +27,21 @@ type Builder struct { type stream struct { labels labels.Labels - chunks ChunkMetas + fp model.Fingerprint + chunks index.ChunkMetas } func NewBuilder() *Builder { return &Builder{streams: make(map[string]*stream)} } -func (b *Builder) AddSeries(ls labels.Labels, chks []ChunkMeta) { +func (b *Builder) AddSeries(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) { id := ls.String() s, ok := b.streams[id] if !ok { s = &stream{ labels: ls, + fp: fp, } b.streams[id] = s } @@ -71,21 +49,28 @@ func (b *Builder) AddSeries(ls labels.Labels, chks []ChunkMeta) { s.chunks = append(s.chunks, chks...) } -func (b *Builder) Build(ctx context.Context, dir, tenant string) (id Identifier, err error) { +func (b *Builder) Build( + ctx context.Context, + scratchDir string, + // Determines how to create the resulting Identifier and file name. + // This is variable as we use Builder for multiple reasons, + // such as building multi-tenant tsdbs on the ingester + // and per tenant ones during compaction + createFn func(from, through model.Time, checksum uint32) Identifier, +) (id Identifier, err error) { // Ensure the parent dir exists (i.e. index///) - parent := filepath.Join(dir, tenant) - if parent != "" { - if err := chunk_util.EnsureDirectory(parent); err != nil { + if scratchDir != "" { + if err := chunk_util.EnsureDirectory(scratchDir); err != nil { return id, err } } // First write tenant/index-bounds-random.staging rng := rand.Int63() - name := fmt.Sprintf("%s-%x.staging", IndexFilename, rng) - tmpPath := filepath.Join(parent, name) + name := fmt.Sprintf("%s-%x.staging", index.IndexFilename, rng) + tmpPath := filepath.Join(scratchDir, name) - writer, err := NewWriter(ctx, tmpPath) + writer, err := index.NewWriter(ctx, tmpPath) if err != nil { return id, err } @@ -128,7 +113,7 @@ func (b *Builder) Build(ctx context.Context, dir, tenant string) (id Identifier, // Add series for i, s := range streams { - if err := writer.AddSeries(storage.SeriesRef(i), s.labels, s.chunks.finalize()...); err != nil { + if err := writer.AddSeries(storage.SeriesRef(i), s.labels, s.fp, s.chunks.Finalize()...); err != nil { return id, err } } @@ -137,7 +122,7 @@ func (b *Builder) Build(ctx context.Context, dir, tenant string) (id Identifier, return id, err } - reader, err := NewFileReader(tmpPath) + reader, err := index.NewFileReader(tmpPath) if err != nil { return id, err } @@ -145,12 +130,7 @@ func (b *Builder) Build(ctx context.Context, dir, tenant string) (id Identifier, from, through := reader.Bounds() // load the newly compacted index to grab checksum, promptly close - id = Identifier{ - Tenant: tenant, - From: model.Time(from), - Through: model.Time(through), - Checksum: reader.Checksum(), - } + dst := createFn(model.Time(from), model.Time(through), reader.Checksum()) reader.Close() defer func() { @@ -159,11 +139,13 @@ func (b *Builder) Build(ctx context.Context, dir, tenant string) (id Identifier, } }() - dst := id.FilePath(dir) - - if err := os.Rename(tmpPath, dst); err != nil { + if err := chunk_util.EnsureDirectory(filepath.Dir(dst.Path())); err != nil { + return id, err + } + dstPath := dst.Path() + if err := os.Rename(tmpPath, dstPath); err != nil { return id, err } - return id, nil + return dst, nil } diff --git a/pkg/storage/stores/tsdb/chunkwriter.go b/pkg/storage/stores/tsdb/chunkwriter.go new file mode 100644 index 0000000000000..5b2da8f0db4c5 --- /dev/null +++ b/pkg/storage/stores/tsdb/chunkwriter.go @@ -0,0 +1,100 @@ +package tsdb + +import ( + "context" + + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/chunk/fetcher" + "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/series" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" + "github.com/grafana/loki/pkg/util/spanlogger" +) + +type IndexWriter interface { + Append(userID string, ls labels.Labels, chks index.ChunkMetas) error +} + +type ChunkWriter struct { + schemaCfg config.SchemaConfig + fetcher *fetcher.Fetcher + indexWriter IndexWriter +} + +func NewChunkWriter( + fetcher *fetcher.Fetcher, + pd config.PeriodConfig, + indexWriter IndexWriter, +) *ChunkWriter { + return &ChunkWriter{ + schemaCfg: config.SchemaConfig{ + Configs: []config.PeriodConfig{pd}, + }, + fetcher: fetcher, + indexWriter: indexWriter, + } +} + +func (w *ChunkWriter) Put(ctx context.Context, chunks []chunk.Chunk) error { + for _, chunk := range chunks { + if err := w.PutOne(ctx, chunk.From, chunk.Through, chunk); err != nil { + return err + } + } + return nil +} + +func (w *ChunkWriter) PutOne(ctx context.Context, from, through model.Time, chk chunk.Chunk) error { + log, ctx := spanlogger.New(ctx, "SeriesStore.PutOne") + defer log.Finish() + + // with local TSDB indices, we _always_ write the index entry + // to avoid data loss if we lose an ingester's disk + // but we can skip writing the chunk if another replica + // has already written it to storage. + writeChunk := true + + // If this chunk is in cache it must already be in the database so we don't need to write it again + found, _, _, _ := w.fetcher.Cache().Fetch(ctx, []string{w.schemaCfg.ExternalKey(chk.ChunkRef)}) + + if len(found) > 0 { + writeChunk = false + series.DedupedChunksTotal.Inc() + } + + chunks := []chunk.Chunk{chk} + + c := w.fetcher.Client() + if writeChunk { + if err := c.PutChunks(ctx, chunks); err != nil { + return errors.Wrap(err, "writing chunk") + } + } + + // Always write the index to benefit durability via replication factor. + metas := index.ChunkMetas{ + { + Checksum: chk.ChunkRef.Checksum, + MinTime: int64(chk.ChunkRef.From), + MaxTime: int64(chk.ChunkRef.Through), + KB: uint32(chk.Size()) / (1 << 10), + Entries: uint32(chk.Data.Entries()), + }, + } + if err := w.indexWriter.Append(chk.UserID, chk.Metric, metas); err != nil { + return errors.Wrap(err, "writing index entry") + } + + if writeChunk { + if cacheErr := w.fetcher.WriteBackCache(ctx, chunks); cacheErr != nil { + level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) + } + } + + return nil +} diff --git a/pkg/storage/stores/tsdb/compact.go b/pkg/storage/stores/tsdb/compact.go index a5df265ff8252..cb2db193c95fd 100644 --- a/pkg/storage/stores/tsdb/compact.go +++ b/pkg/storage/stores/tsdb/compact.go @@ -2,6 +2,7 @@ package tsdb import ( "context" + "fmt" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -23,28 +24,45 @@ func NewCompactor(tenant, parentDir string) *Compactor { } } -func (c *Compactor) Compact(ctx context.Context, indices ...*TSDBIndex) (res index.Identifier, err error) { +func (c *Compactor) Compact(ctx context.Context, indices ...*TSDBIndex) (res Identifier, err error) { // No need to compact a single index file if len(indices) == 1 { - return indices[0].Identifier(c.tenant), nil + return newPrefixedIdentifier( + indices[0].Identifier(c.tenant), + c.parentDir, + c.parentDir, + ), + nil } - b := index.NewBuilder() - multi, err := NewMultiIndex(indices...) + ifcs := make([]Index, 0, len(indices)) + for _, idx := range indices { + ifcs = append(ifcs, idx) + } + + b := NewBuilder() + multi, err := NewMultiIndex(ifcs...) if err != nil { return res, err } + // TODO(owen-d): introduce parallelism + // Until then, // Instead of using the MultiIndex.forIndices helper, we loop over each sub-index manually. // The index builder is single threaded, so we avoid races. // Additionally, this increases the likelihood we add chunks in order // by processing the indices in ascending order. for _, idx := range multi.indices { - if err := idx.forSeries( + casted, ok := idx.(*TSDBIndex) + if !ok { + return nil, fmt.Errorf("expected tsdb index to compact, found :%T", idx) + } + if err := casted.forSeries( + ctx, nil, func(ls labels.Labels, _ model.Fingerprint, chks []index.ChunkMeta) { // AddSeries copies chks into it's own slice - b.AddSeries(ls.Copy(), chks) + b.AddSeries(ls.Copy(), model.Fingerprint(ls.Hash()), chks) }, labels.MustNewMatcher(labels.MatchEqual, "", ""), ); err != nil { @@ -52,5 +70,17 @@ func (c *Compactor) Compact(ctx context.Context, indices ...*TSDBIndex) (res ind } } - return b.Build(ctx, c.parentDir, c.tenant) + return b.Build( + ctx, + c.parentDir, + func(from, through model.Time, checksum uint32) Identifier { + id := SingleTenantTSDBIdentifier{ + Tenant: c.tenant, + From: from, + Through: through, + Checksum: checksum, + } + return newPrefixedIdentifier(id, c.parentDir, c.parentDir) + }, + ) } diff --git a/pkg/storage/stores/tsdb/compact_test.go b/pkg/storage/stores/tsdb/compact_test.go index 143b1fa4c8383..a7423fe34219c 100644 --- a/pkg/storage/stores/tsdb/compact_test.go +++ b/pkg/storage/stores/tsdb/compact_test.go @@ -356,7 +356,9 @@ func TestCompactor(t *testing.T) { for _, cases := range tc.input { idx := BuildIndex(t, dir, "fake", cases) defer idx.Close() - indices = append(indices, idx) + casted, ok := idx.Index.(*TSDBIndex) + require.Equal(t, true, ok) + indices = append(indices, casted) } out, err := c.Compact(context.Background(), indices...) @@ -364,8 +366,9 @@ func TestCompactor(t *testing.T) { require.NotNil(t, err) return } + require.Nil(t, err) - idx, err := LoadTSDBIdentifier(dir, out) + idx, err := NewShippableTSDBFile(out, false) require.Nil(t, err) defer idx.Close() diff --git a/pkg/storage/stores/tsdb/head.go b/pkg/storage/stores/tsdb/head.go index ef4a3c0316d15..3e074ac372b7d 100644 --- a/pkg/storage/stores/tsdb/head.go +++ b/pkg/storage/stores/tsdb/head.go @@ -36,6 +36,8 @@ const ( // 1) Heads are per-tenant in Loki // 2) Loki tends to have a few orders of magnitude less series per node than // Prometheus|Cortex|Mimir. + // Do not specify without bit shifting. This allows us to + // do shard index calcuations via bitwise & rather than modulos. defaultStripeSize = 64 ) @@ -53,16 +55,36 @@ guaranteeing we maintain querying consistency for the entire data lifecycle. */ // TODO(owen-d) -type HeadMetrics struct { - seriesNotFound prometheus.Counter +type Metrics struct { + seriesNotFound prometheus.Counter + tsdbCreationsTotal prometheus.Counter + tsdbCreationFailures prometheus.Counter + tsdbManagerUpdatesTotal prometheus.Counter + tsdbManagerUpdatesFailedTotal prometheus.Counter } -func NewHeadMetrics(r prometheus.Registerer) *HeadMetrics { - return &HeadMetrics{ +func NewMetrics(r prometheus.Registerer) *Metrics { + return &Metrics{ seriesNotFound: promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "loki_tsdb_head_series_not_found_total", Help: "Total number of requests for series that were not found.", }), + tsdbCreationsTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "loki_tsdb_creations_total", + Help: "Total number of tsdb creations attempted", + }), + tsdbCreationFailures: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "loki_tsdb_creations_failed_total", + Help: "Total number of tsdb creations failed", + }), + tsdbManagerUpdatesTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "loki_tsdb_manager_updates_total", + Help: "Total number of tsdb manager updates (loading/rotating tsdbs in mem)", + }), + tsdbManagerUpdatesFailedTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "loki_tsdb_manager_updates_failed_total", + Help: "Total number of tsdb manager update failures (loading/rotating tsdbs in mem)", + }), } } @@ -75,7 +97,7 @@ type Head struct { // in the MemPostings, but is eventually discarded when we create a real TSDB index. lastSeriesID atomic.Uint64 - metrics *HeadMetrics + metrics *Metrics logger log.Logger series *stripeSeries @@ -86,7 +108,7 @@ type Head struct { closed bool } -func NewHead(tenant string, metrics *HeadMetrics, logger log.Logger) *Head { +func NewHead(tenant string, metrics *Metrics, logger log.Logger) *Head { return &Head{ tenant: tenant, metrics: metrics, @@ -108,42 +130,44 @@ func (h *Head) MaxTime() int64 { return h.maxTime.Load() } -func (h *Head) updateMinMaxTime(mint, maxt int64) { +// Will CAS until successfully updates bounds or the condition is no longer valid +func updateMintMaxt(mint, maxt int64, mintSrc, maxtSrc *atomic.Int64) { for { - lt := h.MinTime() - if mint >= lt { + lt := mintSrc.Load() + if mint >= lt && lt != 0 { break } - if h.minTime.CAS(lt, mint) { + if mintSrc.CAS(lt, mint) { break } } for { - ht := h.MaxTime() + ht := maxtSrc.Load() if maxt <= ht { break } - if h.maxTime.CAS(ht, maxt) { + if maxtSrc.CAS(ht, maxt) { break } } } // Note: chks must not be nil or zero-length -func (h *Head) Append(ls labels.Labels, chks index.ChunkMetas) { +func (h *Head) Append(ls labels.Labels, chks index.ChunkMetas) (created bool, refID uint64) { from, through := chks.Bounds() var id uint64 - created := h.series.Append(ls, chks, func() *memSeries { + created, refID = h.series.Append(ls, chks, func() *memSeries { id = h.lastSeriesID.Inc() return newMemSeries(id, ls) }) - h.updateMinMaxTime(int64(from), int64(through)) + updateMintMaxt(int64(from), int64(through), &h.minTime, &h.maxTime) if !created { return } h.postings.Add(storage.SeriesRef(id), ls) h.numSeries.Inc() + return } // seriesHashmap is a simple hashmap for memSeries by their label set. It is built @@ -211,7 +235,7 @@ func (s *stripeSeries) Append( ls labels.Labels, chks index.ChunkMetas, createFn func() *memSeries, -) (created bool) { +) (created bool, refID uint64) { fp := ls.Hash() i := fp & uint64(s.shards-1) mtx := &s.locks[i] @@ -222,7 +246,7 @@ func (s *stripeSeries) Append( series = createFn() s.hashes[i].set(fp, series) - // the series locks are modulo'd by the ref, not fingerprint + // the series locks are determined by the ref, not fingerprint refIdx := series.ref & uint64(s.shards-1) s.series[refIdx][series.ref] = series created = true @@ -231,6 +255,7 @@ func (s *stripeSeries) Append( series.Lock() series.chks = append(series.chks, chks...) + refID = series.ref series.Unlock() return diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go new file mode 100644 index 0000000000000..93bfe015086f2 --- /dev/null +++ b/pkg/storage/stores/tsdb/head_manager.go @@ -0,0 +1,663 @@ +package tsdb + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" + + "github.com/cespare/xxhash" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" + "go.uber.org/atomic" + + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/chunk/client/util" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" + "github.com/grafana/loki/pkg/util/wal" +) + +type period time.Duration + +const defaultRotationPeriod = period(15 * time.Minute) + +// Do not specify without bit shifting. This allows us to +// do shard index calcuations via bitwise & rather than modulos. +const defaultHeadManagerStripeSize = 1 << 7 + +/* +HeadManager both accepts flushed chunk writes +and exposes the index interface for multiple tenants. +It also handles updating an underlying WAL and periodically +rotates both the tenant Heads and the underlying WAL, using +the old versions to build + upload TSDB files. + +On disk, it looks like: + +tsdb/ + v1/ + # scratch directory used for temp tsdb files during build stage + scratch/ + # wal directory used to store WALs being written on the ingester. + # These are eventually shipped to storage as multi-tenant TSDB files + # and compacted into per tenant indices + wal/ + + # multitenant tsdb files which are created on the ingesters/shipped + multitenant/ + -.tsdb + per_tenant/ + # post-compaction tenant tsdbs which are grouped per + # period bucket + / + / + index---.tsdb +*/ + +type HeadManager struct { + log log.Logger + dir string + metrics *Metrics + + // RLocked for all writes/reads, + // Locked before rotating heads/wal + mtx sync.RWMutex + + // how often WALs should be rotated and TSDBs cut + period period + + tsdbManager TSDBManager + active, prev *headWAL + + shards int + activeHeads, prevHeads *tenantHeads + + Index +} + +func NewHeadManager(logger log.Logger, dir string, metrics *Metrics, tsdbManager TSDBManager) *HeadManager { + shards := defaultHeadManagerStripeSize + m := &HeadManager{ + log: log.With(logger, "component", "tsdb-head-manager"), + dir: dir, + metrics: metrics, + tsdbManager: tsdbManager, + + period: defaultRotationPeriod, + shards: shards, + } + + m.Index = LazyIndex(func() (Index, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + var indices []Index + if m.prevHeads != nil { + indices = append(indices, m.prevHeads) + } + if m.activeHeads != nil { + indices = append(indices, m.activeHeads) + } + + indices = append(indices, m.tsdbManager) + + return NewMultiIndex(indices...) + + }) + + return m +} + +func (m *HeadManager) Stop() error { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.active.Stop() +} + +func (m *HeadManager) Append(userID string, ls labels.Labels, chks index.ChunkMetas) error { + labelsBuilder := labels.NewBuilder(ls) + // TSDB doesnt need the __name__="log" convention the old chunk store index used. + labelsBuilder.Del("__name__") + metric := labelsBuilder.Labels() + + m.mtx.RLock() + now := time.Now() + if m.period.PeriodFor(now) > m.period.PeriodFor(m.activeHeads.start) { + m.mtx.RUnlock() + if err := m.Rotate(now); err != nil { + return errors.Wrap(err, "rotating TSDB Head") + } + m.mtx.RLock() + } + defer m.mtx.RUnlock() + rec := m.activeHeads.Append(userID, metric, chks) + return m.active.Log(rec) +} + +func (p period) PeriodFor(t time.Time) int { + return int(t.UnixNano() / int64(p)) +} + +func (p period) TimeForPeriod(n int) time.Time { + return time.Unix(0, int64(p)*int64(n)) +} + +func (m *HeadManager) Start() error { + if err := os.RemoveAll(filepath.Join(m.dir, "scratch")); err != nil { + return errors.Wrap(err, "removing tsdb scratch dir") + } + + for _, d := range managerRequiredDirs(m.dir) { + if err := util.EnsureDirectory(d); err != nil { + return errors.Wrapf(err, "ensuring required directory exists: %s", d) + } + } + + now := time.Now() + curPeriod := m.period.PeriodFor(now) + + toRemove, err := m.shippedTSDBsBeforePeriod(curPeriod) + if err != nil { + return err + } + + for _, x := range toRemove { + if err := os.RemoveAll(x); err != nil { + return errors.Wrapf(err, "removing tsdb: %s", x) + } + } + + walsByPeriod, err := walsByPeriod(m.dir, m.period) + if err != nil { + return err + } + + m.activeHeads = newTenantHeads(now, m.shards, m.metrics, m.log) + + for _, group := range walsByPeriod { + if group.period < (curPeriod) { + if err := m.tsdbManager.BuildFromWALs( + m.period.TimeForPeriod(group.period), + group.wals, + ); err != nil { + return errors.Wrap(err, "building tsdb") + } + // Now that we've built tsdbs of this data, we can safely remove the WALs + if err := m.removeWALGroup(group); err != nil { + return errors.Wrapf(err, "removing wals for period %d", group.period) + } + } + + if group.period == curPeriod { + if err := recoverHead(m.dir, m.activeHeads, group.wals); err != nil { + return errors.Wrap(err, "recovering tsdb head from wal") + } + } + } + + nextWALPath := walPath(m.dir, now) + nextWAL, err := newHeadWAL(m.log, nextWALPath, now) + if err != nil { + return errors.Wrapf(err, "creating tsdb wal: %s", nextWALPath) + } + m.active = nextWAL + + return nil +} + +func managerRequiredDirs(parent string) []string { + return []string{ + managerScratchDir(parent), + managerWalDir(parent), + managerMultitenantDir(parent), + managerPerTenantDir(parent), + } +} +func managerScratchDir(parent string) string { + return filepath.Join(parent, "scratch") +} + +func managerWalDir(parent string) string { + return filepath.Join(parent, "wal") +} + +func managerMultitenantDir(parent string) string { + return filepath.Join(parent, "multitenant") +} + +func managerPerTenantDir(parent string) string { + return filepath.Join(parent, "per_tenant") +} + +func (m *HeadManager) Rotate(t time.Time) error { + // create new wal + nextWALPath := walPath(m.dir, t) + nextWAL, err := newHeadWAL(m.log, nextWALPath, t) + if err != nil { + return errors.Wrapf(err, "creating tsdb wal: %s during rotation", nextWALPath) + } + + // create new tenant heads + nextHeads := newTenantHeads(t, m.shards, m.metrics, m.log) + + stopPrev := func(s string) { + if m.prev != nil { + if err := m.prev.Stop(); err != nil { + level.Error(m.log).Log( + "msg", "failed stopping wal", + "period", m.period.PeriodFor(m.prev.initialized), + "err", err, + "wal", s, + ) + } + } + } + + stopPrev("previous cycle") // stop the previous wal if it hasn't been cleaned up yet + m.mtx.Lock() + m.prev = m.active + m.prevHeads = m.activeHeads + m.active = nextWAL + m.activeHeads = nextHeads + m.mtx.Unlock() + stopPrev("freshly rotated") // stop the newly rotated-out wal + + // build tsdb from rotated-out period + // TODO(owen-d): don't block Append() waiting for tsdb building. Use a work channel/etc + if m.prev != nil { + level.Debug(m.log).Log("msg", "combining tsdb WALs") + grp, _, err := walsForPeriod(m.dir, m.period, m.period.PeriodFor(m.prev.initialized)) + if err != nil { + return errors.Wrap(err, "listing wals") + } + level.Debug(m.log).Log("msg", "listed WALs", "pd", grp.period, "n", len(grp.wals)) + + // TODO(owen-d): It's probably faster to build this from the *tenantHeads instead, + // but we already need to impl BuildFromWALs to ensure we can correctly build/ship + // TSDBs from orphaned WALs of previous periods during startup. + // we use the m.prev.initialized timestamp here for the filename to ensure it can't clobber + // an existing file from a previous cycle. I don't think this is possible, but + // perhaps in some unusual crashlooping it could be, so let's be safe and protect ourselves. + if err := m.tsdbManager.BuildFromWALs(m.prev.initialized, grp.wals); err != nil { + return errors.Wrapf(err, "building TSDB from prevHeads WALs for period %d", grp.period) + } + + // Now that a TSDB has been created from this group, it's safe to remove them + if err := m.removeWALGroup(grp); err != nil { + return errors.Wrapf(err, "removing prev TSDB WALs for period %d", grp.period) + } + level.Debug(m.log).Log("msg", "removing wals", "pd", grp.period, "n", len(grp.wals)) + + } + + // Now that the tsdbManager has the updated TSDBs, we can remove our references + m.mtx.Lock() + m.prevHeads = nil + m.prev = nil + m.mtx.Unlock() + return nil +} + +func (m *HeadManager) shippedTSDBsBeforePeriod(period int) (res []string, err error) { + files, err := ioutil.ReadDir(managerPerTenantDir(m.dir)) + if err != nil { + return nil, err + } + for _, f := range files { + if id, ok := parseMultitenantTSDBPath(f.Name()); ok { + if found := m.period.PeriodFor(id.ts); found < period { + res = append(res, f.Name()) + } + } + } + return +} + +type WalGroup struct { + period int + wals []WALIdentifier +} + +func walsByPeriod(dir string, period period) ([]WalGroup, error) { + + groupsMap, err := walGroups(dir, period) + if err != nil { + return nil, err + } + res := make([]WalGroup, 0, len(groupsMap)) + for _, grp := range groupsMap { + res = append(res, *grp) + } + // Ensure the earliers periods are seen first + sort.Slice(res, func(i, j int) bool { + return res[i].period < res[j].period + }) + return res, nil +} + +func walGroups(dir string, period period) (map[int]*WalGroup, error) { + files, err := ioutil.ReadDir(managerWalDir(dir)) + if err != nil { + return nil, err + } + + groupsMap := map[int]*WalGroup{} + + for _, f := range files { + if id, ok := parseWALPath(f.Name()); ok { + pd := period.PeriodFor(id.ts) + grp, ok := groupsMap[pd] + if !ok { + grp = &WalGroup{ + period: pd, + } + groupsMap[pd] = grp + } + grp.wals = append(grp.wals, id) + } + } + + for _, grp := range groupsMap { + // Ensure the earliest wals are seen first + sort.Slice(grp.wals, func(i, j int) bool { + return grp.wals[i].ts.Before(grp.wals[j].ts) + }) + } + return groupsMap, nil +} + +func walsForPeriod(dir string, period period, offset int) (WalGroup, bool, error) { + groupsMap, err := walGroups(dir, period) + if err != nil { + return WalGroup{}, false, err + } + + grp, ok := groupsMap[offset] + if !ok { + return WalGroup{}, false, nil + } + + return *grp, true, nil +} + +func (m *HeadManager) removeWALGroup(grp WalGroup) error { + for _, wal := range grp.wals { + if err := os.RemoveAll(walPath(m.dir, wal.ts)); err != nil { + return errors.Wrapf(err, "removing tsdb wal: %s", walPath(m.dir, wal.ts)) + } + } + return nil +} + +func walPath(parent string, t time.Time) string { + return filepath.Join( + managerWalDir(parent), + fmt.Sprintf("%d", t.Unix()), + ) +} + +// recoverHead recovers from all WALs belonging to some period +// and inserts it into the active *tenantHeads +func recoverHead(dir string, heads *tenantHeads, wals []WALIdentifier) error { + for _, id := range wals { + // use anonymous function for ease of cleanup + if err := func(id WALIdentifier) error { + reader, closer, err := wal.NewWalReader(walPath(dir, id.ts), -1) + if err != nil { + return err + } + defer closer.Close() + + // map of users -> ref -> series. + // Keep track of which ref corresponds to which series + // for each WAL so we replay into the correct series + seriesMap := make(map[string]map[uint64]labels.Labels) + + for reader.Next() { + rec := &WALRecord{} + if err := decodeWALRecord(reader.Record(), rec); err != nil { + return err + } + + if len(rec.Series.Labels) > 0 { + tenant, ok := seriesMap[rec.UserID] + if !ok { + tenant = make(map[uint64]labels.Labels) + seriesMap[rec.UserID] = tenant + } + tenant[uint64(rec.Series.Ref)] = rec.Series.Labels + } + + if len(rec.Chks.Chks) > 0 { + tenant, ok := seriesMap[rec.UserID] + if !ok { + return errors.New("found tsdb chunk metas without user in WAL replay") + } + ls, ok := tenant[rec.Chks.Ref] + if !ok { + return errors.New("found tsdb chunk metas without series in WAL replay") + } + _ = heads.Append(rec.UserID, ls, rec.Chks.Chks) + } + } + return reader.Err() + + }(id); err != nil { + return errors.Wrap( + err, + "error recovering from TSDB WAL", + ) + } + } + return nil +} + +type WALIdentifier struct { + ts time.Time +} + +func parseWALPath(p string) (id WALIdentifier, ok bool) { + ts, err := strconv.Atoi(p) + if err != nil { + return + } + + return WALIdentifier{ + ts: time.Unix(int64(ts), 0), + }, true +} + +type tenantHeads struct { + mint, maxt atomic.Int64 // easy lookup for Bounds() impl + + start time.Time + shards int + locks []sync.RWMutex + tenants []map[string]*Head + log log.Logger + chunkFilter chunk.RequestChunkFilterer + metrics *Metrics +} + +func newTenantHeads(start time.Time, shards int, metrics *Metrics, logger log.Logger) *tenantHeads { + res := &tenantHeads{ + start: start, + shards: shards, + locks: make([]sync.RWMutex, shards), + tenants: make([]map[string]*Head, shards), + log: log.With(logger, "component", "tenant-heads"), + metrics: metrics, + } + for i := range res.tenants { + res.tenants[i] = make(map[string]*Head) + } + return res +} + +func (t *tenantHeads) Append(userID string, ls labels.Labels, chks index.ChunkMetas) *WALRecord { + idx := t.shardForTenant(userID) + + var mint, maxt int64 + for _, chk := range chks { + if chk.MinTime < mint || mint == 0 { + mint = chk.MinTime + } + + if chk.MaxTime > maxt { + maxt = chk.MaxTime + } + } + updateMintMaxt(mint, maxt, &t.mint, &t.maxt) + + // First, check if this tenant has been created + var ( + mtx = &t.locks[idx] + newStream bool + refID uint64 + ) + mtx.RLock() + if head, ok := t.tenants[idx][userID]; ok { + newStream, refID = head.Append(ls, chks) + mtx.RUnlock() + } else { + // tenant does not exist, so acquire write lock to insert it + mtx.RUnlock() + mtx.Lock() + head := NewHead(userID, t.metrics, t.log) + t.tenants[idx][userID] = head + newStream, refID = head.Append(ls, chks) + mtx.Unlock() + } + + rec := &WALRecord{ + UserID: userID, + Chks: ChunkMetasRecord{ + Ref: refID, + Chks: chks, + }, + } + + if newStream { + rec.Series = record.RefSeries{ + Ref: chunks.HeadSeriesRef(refID), + Labels: ls, + } + } + + return rec +} + +func (t *tenantHeads) shardForTenant(userID string) uint64 { + return xxhash.Sum64String(userID) & uint64(t.shards-1) +} + +func (t *tenantHeads) Close() error { return nil } + +func (t *tenantHeads) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + t.chunkFilter = chunkFilter +} + +func (t *tenantHeads) Bounds() (model.Time, model.Time) { + return model.Time(t.mint.Load()), model.Time(t.maxt.Load()) +} + +func (t *tenantHeads) tenantIndex(userID string, from, through model.Time) (idx Index, unlock func(), ok bool) { + i := t.shardForTenant(userID) + t.locks[i].RLock() + tenant, ok := t.tenants[i][userID] + if !ok { + t.locks[i].RUnlock() + return + } + + idx = NewTSDBIndex(tenant.indexRange(int64(from), int64(through))) + if t.chunkFilter != nil { + idx.SetChunkFilterer(t.chunkFilter) + } + return idx, t.locks[i].RUnlock, true + +} + +func (t *tenantHeads) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { + idx, unlock, ok := t.tenantIndex(userID, from, through) + if !ok { + return nil, nil + } + defer unlock() + return idx.GetChunkRefs(ctx, userID, from, through, nil, shard, matchers...) + +} + +// Series follows the same semantics regarding the passed slice and shard as GetChunkRefs. +func (t *tenantHeads) Series(ctx context.Context, userID string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { + idx, unlock, ok := t.tenantIndex(userID, from, through) + if !ok { + return nil, nil + } + defer unlock() + return idx.Series(ctx, userID, from, through, nil, shard, matchers...) + +} + +func (t *tenantHeads) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { + idx, unlock, ok := t.tenantIndex(userID, from, through) + if !ok { + return nil, nil + } + defer unlock() + return idx.LabelNames(ctx, userID, from, through, matchers...) + +} + +func (t *tenantHeads) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { + idx, unlock, ok := t.tenantIndex(userID, from, through) + if !ok { + return nil, nil + } + defer unlock() + return idx.LabelValues(ctx, userID, from, through, name, matchers...) + +} + +// helper only used in building TSDBs +func (t *tenantHeads) forAll(fn func(user string, ls labels.Labels, chks index.ChunkMetas)) error { + for i, shard := range t.tenants { + t.locks[i].RLock() + defer t.locks[i].RUnlock() + + for user, tenant := range shard { + idx := tenant.Index() + ps, err := postingsForMatcher(idx, nil, labels.MustNewMatcher(labels.MatchEqual, "", "")) + if err != nil { + return err + } + + for ps.Next() { + var ( + ls labels.Labels + chks []index.ChunkMeta + ) + + _, err := idx.Series(ps.At(), &ls, &chks) + + if err != nil { + return errors.Wrapf(err, "iterating postings for tenant: %s", user) + } + + fn(user, ls, chks) + } + } + } + + return nil +} diff --git a/pkg/storage/stores/tsdb/head_manager_test.go b/pkg/storage/stores/tsdb/head_manager_test.go new file mode 100644 index 0000000000000..0b032e9e4fcaf --- /dev/null +++ b/pkg/storage/stores/tsdb/head_manager_test.go @@ -0,0 +1,310 @@ +package tsdb + +import ( + "context" + "math" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/storage/chunk/client/util" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +type noopTSDBManager struct{ NoopIndex } + +func (noopTSDBManager) BuildFromWALs(_ time.Time, _ []WALIdentifier) error { return nil } + +func chunkMetasToChunkRefs(user string, fp uint64, xs index.ChunkMetas) (res []ChunkRef) { + for _, x := range xs { + res = append(res, ChunkRef{ + User: user, + Fingerprint: model.Fingerprint(fp), + Start: x.From(), + End: x.Through(), + Checksum: x.Checksum, + }) + } + return +} + +// Test append +func Test_TenantHeads_Append(t *testing.T) { + h := newTenantHeads(time.Now(), defaultHeadManagerStripeSize, NewMetrics(nil), log.NewNopLogger()) + ls := mustParseLabels(`{foo="bar"}`) + chks := []index.ChunkMeta{ + { + Checksum: 0, + MinTime: 1, + MaxTime: 10, + KB: 2, + Entries: 30, + }, + } + _ = h.Append("fake", ls, chks) + + found, err := h.GetChunkRefs( + context.Background(), + "fake", + 0, + 100, + nil, nil, + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + ) + require.Nil(t, err) + require.Equal(t, chunkMetasToChunkRefs("fake", ls.Hash(), chks), found) + +} + +// Test multitenant reads +func Test_TenantHeads_MultiRead(t *testing.T) { + h := newTenantHeads(time.Now(), defaultHeadManagerStripeSize, NewMetrics(nil), log.NewNopLogger()) + ls := mustParseLabels(`{foo="bar"}`) + chks := []index.ChunkMeta{ + { + Checksum: 0, + MinTime: 1, + MaxTime: 10, + KB: 2, + Entries: 30, + }, + } + + tenants := []struct { + user string + ls labels.Labels + }{ + { + user: "tenant1", + ls: append(ls.Copy(), labels.Label{ + Name: "tenant", + Value: "tenant1", + }), + }, + { + user: "tenant2", + ls: append(ls.Copy(), labels.Label{ + Name: "tenant", + Value: "tenant2", + }), + }, + } + + // add data for both tenants + for _, tenant := range tenants { + _ = h.Append(tenant.user, tenant.ls, chks) + + } + + // ensure we're only returned the data from the correct tenant + for _, tenant := range tenants { + found, err := h.GetChunkRefs( + context.Background(), + tenant.user, + 0, + 100, + nil, nil, + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + ) + require.Nil(t, err) + require.Equal(t, chunkMetasToChunkRefs(tenant.user, tenant.ls.Hash(), chks), found) + } + +} + +// test head recover from wal +func Test_HeadManager_RecoverHead(t *testing.T) { + now := time.Now() + dir := t.TempDir() + cases := []struct { + Labels labels.Labels + Chunks []index.ChunkMeta + User string + }{ + { + User: "tenant1", + Labels: mustParseLabels(`{foo="bar", bazz="buzz"}`), + Chunks: []index.ChunkMeta{ + { + MinTime: 1, + MaxTime: 10, + Checksum: 3, + }, + }, + }, + { + User: "tenant2", + Labels: mustParseLabels(`{foo="bard", bazz="bozz", bonk="borb"}`), + Chunks: []index.ChunkMeta{ + { + MinTime: 1, + MaxTime: 7, + Checksum: 4, + }, + }, + }, + } + + mgr := NewHeadManager(log.NewNopLogger(), dir, nil, noopTSDBManager{}) + // This bit is normally handled by the Start() fn, but we're testing a smaller surface area + // so ensure our dirs exist + for _, d := range managerRequiredDirs(dir) { + require.Nil(t, util.EnsureDirectory(d)) + } + + // Call Rotate() to ensure the new head tenant heads exist, etc + require.Nil(t, mgr.Rotate(now)) + + // now build a WAL independently to test recovery + w, err := newHeadWAL(log.NewNopLogger(), walPath(mgr.dir, now), now) + require.Nil(t, err) + + for i, c := range cases { + require.Nil(t, w.Log(&WALRecord{ + UserID: c.User, + Series: record.RefSeries{ + Ref: chunks.HeadSeriesRef(i), + Labels: c.Labels, + }, + Chks: ChunkMetasRecord{ + Chks: c.Chunks, + Ref: uint64(i), + }, + })) + } + + require.Nil(t, w.Stop()) + + grp, ok, err := walsForPeriod(mgr.dir, mgr.period, mgr.period.PeriodFor(now)) + require.Nil(t, err) + require.True(t, ok) + require.Equal(t, 1, len(grp.wals)) + require.Nil(t, recoverHead(mgr.dir, mgr.activeHeads, grp.wals)) + + for _, c := range cases { + refs, err := mgr.GetChunkRefs( + context.Background(), + c.User, + 0, math.MaxInt64, + nil, nil, + labels.MustNewMatcher(labels.MatchRegexp, "foo", ".+"), + ) + require.Nil(t, err) + require.Equal(t, chunkMetasToChunkRefs(c.User, c.Labels.Hash(), c.Chunks), refs) + } + +} + +// test mgr recover from multiple wals across multiple periods +func Test_HeadManager_Lifecycle(t *testing.T) { + dir := t.TempDir() + curPeriod := time.Now() + cases := []struct { + Labels labels.Labels + Chunks []index.ChunkMeta + User string + }{ + { + User: "tenant1", + Labels: mustParseLabels(`{foo="bar", bazz="buzz"}`), + Chunks: []index.ChunkMeta{ + { + MinTime: 1, + MaxTime: 10, + Checksum: 3, + }, + }, + }, + { + User: "tenant2", + Labels: mustParseLabels(`{foo="bard", bazz="bozz", bonk="borb"}`), + Chunks: []index.ChunkMeta{ + { + MinTime: 1, + MaxTime: 7, + Checksum: 4, + }, + }, + }, + } + + mgr := NewHeadManager(log.NewNopLogger(), dir, nil, noopTSDBManager{}) + w, err := newHeadWAL(log.NewNopLogger(), walPath(mgr.dir, curPeriod), curPeriod) + require.Nil(t, err) + + // Write old WALs + for i, c := range cases { + require.Nil(t, w.Log(&WALRecord{ + UserID: c.User, + Series: record.RefSeries{ + Ref: chunks.HeadSeriesRef(i), + Labels: c.Labels, + }, + Chks: ChunkMetasRecord{ + Chks: c.Chunks, + Ref: uint64(i), + }, + })) + } + + require.Nil(t, w.Stop()) + + // Start, ensuring recovery from old WALs + require.Nil(t, mgr.Start()) + // Ensure old WAL data is queryable + for _, c := range cases { + refs, err := mgr.GetChunkRefs( + context.Background(), + c.User, + 0, math.MaxInt64, + nil, nil, + labels.MustNewMatcher(labels.MatchRegexp, "foo", ".+"), + ) + require.Nil(t, err) + + lbls := labels.NewBuilder(c.Labels) + lbls.Set(TenantLabel, c.User) + require.Equal(t, chunkMetasToChunkRefs(c.User, c.Labels.Hash(), c.Chunks), refs) + } + + // Add data + newCase := struct { + Labels labels.Labels + Chunks []index.ChunkMeta + User string + }{ + User: "tenant3", + Labels: mustParseLabels(`{foo="bard", other="hi"}`), + Chunks: []index.ChunkMeta{ + { + MinTime: 1, + MaxTime: 7, + Checksum: 4, + }, + }, + } + + require.Nil(t, mgr.Append(newCase.User, newCase.Labels, newCase.Chunks)) + + // Ensure old + new data is queryable + for _, c := range append(cases, newCase) { + refs, err := mgr.GetChunkRefs( + context.Background(), + c.User, + 0, math.MaxInt64, + nil, nil, + labels.MustNewMatcher(labels.MatchRegexp, "foo", ".+"), + ) + require.Nil(t, err) + + lbls := labels.NewBuilder(c.Labels) + lbls.Set(TenantLabel, c.User) + require.Equal(t, chunkMetasToChunkRefs(c.User, c.Labels.Hash(), c.Chunks), refs) + } +} diff --git a/pkg/storage/stores/tsdb/head_read.go b/pkg/storage/stores/tsdb/head_read.go index 0f6e06410733d..5ff524115c100 100644 --- a/pkg/storage/stores/tsdb/head_read.go +++ b/pkg/storage/stores/tsdb/head_read.go @@ -24,8 +24,8 @@ import ( ) // Index returns an IndexReader against the block. -func (h *Head) Index() (IndexReader, error) { - return h.indexRange(math.MinInt64, math.MaxInt64), nil +func (h *Head) Index() IndexReader { + return h.indexRange(math.MinInt64, math.MaxInt64) } func (h *Head) indexRange(mint, maxt int64) *headIndexReader { diff --git a/pkg/storage/stores/tsdb/head_wal.go b/pkg/storage/stores/tsdb/head_wal.go new file mode 100644 index 0000000000000..ed7f72897c1db --- /dev/null +++ b/pkg/storage/stores/tsdb/head_wal.go @@ -0,0 +1,213 @@ +package tsdb + +import ( + "time" + + "github.com/go-kit/log" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wal" + + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" + "github.com/grafana/loki/pkg/util/encoding" +) + +type WAL interface { + Start(time.Time) error + Log(*WALRecord) error + Stop() error +} + +// TODO(owen-d): There are probably some performance gains to be had by utilizing +// pools here, but in the interest of implementation time and given chunks aren't +// flushed often (generally ~5/s), this seems fine. +// This may also be applicable to varint encoding. + +// 128KB +// The segment sizes are kept small for the TSDB Head here because +// we only store chunk references +const walSegmentSize = 128 << 10 + +type RecordType byte + +// By prefixing records with versions, we can easily update our wal schema +const ( + // FirstWrite is a special record type written once + // at the beginning of every WAL. It records the system time + // when the WAL was created. This is used to determine when to rotate + // WALs and persists across restarts. + WalRecordSeries RecordType = iota + WalRecordChunks +) + +type WALRecord struct { + UserID string + Series record.RefSeries + Chks ChunkMetasRecord +} + +type ChunkMetasRecord struct { + Chks index.ChunkMetas + Ref uint64 +} + +func (r *WALRecord) encodeSeries(b []byte) []byte { + buf := encoding.EncWith(b) + buf.PutByte(byte(WalRecordSeries)) + buf.PutUvarintStr(r.UserID) + + var enc record.Encoder + // The 'encoded' already has the type header and userID here, hence re-using + // the remaining part of the slice (i.e. encoded[len(encoded):])) to encode the series. + encoded := buf.Get() + encoded = append(encoded, enc.Series([]record.RefSeries{r.Series}, encoded[len(encoded):])...) + + return encoded +} + +func (r *WALRecord) encodeChunks(b []byte) []byte { + buf := encoding.EncWith(b) + buf.PutByte(byte(WalRecordChunks)) + buf.PutUvarintStr(r.UserID) + buf.PutBE64(r.Chks.Ref) + buf.PutUvarint(len(r.Chks.Chks)) + + for _, chk := range r.Chks.Chks { + buf.PutBE64(uint64(chk.MinTime)) + buf.PutBE64(uint64(chk.MaxTime)) + buf.PutBE32(chk.Checksum) + buf.PutBE32(chk.KB) + buf.PutBE32(chk.Entries) + } + + return buf.Get() +} + +func decodeChunks(b []byte, rec *WALRecord) error { + if len(b) == 0 { + return nil + } + + dec := encoding.DecWith(b) + + rec.Chks.Ref = dec.Be64() + if err := dec.Err(); err != nil { + return errors.Wrap(err, "decoding series ref") + } + + ln := dec.Uvarint() + if err := dec.Err(); err != nil { + return errors.Wrap(err, "decoding number of chunks") + } + // allocate space for the required number of chunks + rec.Chks.Chks = make(index.ChunkMetas, 0, ln) + + for len(dec.B) > 0 && dec.Err() == nil { + rec.Chks.Chks = append(rec.Chks.Chks, index.ChunkMeta{ + MinTime: dec.Be64int64(), + MaxTime: dec.Be64int64(), + Checksum: dec.Be32(), + KB: dec.Be32(), + Entries: dec.Be32(), + }) + } + + if err := dec.Err(); err != nil { + return errors.Wrap(err, "decoding chunk metas") + } + + return nil +} + +func decodeWALRecord(b []byte, walRec *WALRecord) error { + var ( + userID string + dec record.Decoder + + decbuf = encoding.DecWith(b) + t = RecordType(decbuf.Byte()) + ) + + switch t { + case WalRecordSeries: + userID = decbuf.UvarintStr() + rSeries, err := dec.Series(decbuf.B, nil) + if err != nil { + return errors.Wrap(err, "decoding head series") + } + // unlike tsdb, we only add one series per record. + if len(rSeries) > 1 { + return errors.New("more than one series detected in tsdb head wal record") + } + if len(rSeries) == 1 { + walRec.Series = rSeries[0] + } + case WalRecordChunks: + userID = decbuf.UvarintStr() + if err := decodeChunks(decbuf.B, walRec); err != nil { + return err + } + default: + return errors.New("unknown record type") + } + + if decbuf.Err() != nil { + return decbuf.Err() + } + + walRec.UserID = userID + return nil +} + +// the headWAL, unlike Head, is multi-tenant. This is just to avoid the need to maintain +// an open segment per tenant (potentially thousands of them) +type headWAL struct { + initialized time.Time + log log.Logger + wal *wal.WAL +} + +func newHeadWAL(log log.Logger, dir string, t time.Time) (*headWAL, error) { + // NB: if we use a non-nil Prometheus Registerer, ensure + // that the underlying metrics won't conflict with existing WAL metrics in the ingester. + // Likely, this can be done by adding extra label(s) + wal, err := wal.NewSize(log, nil, dir, walSegmentSize, false) + if err != nil { + return nil, err + } + + return &headWAL{ + initialized: t, + log: log, + wal: wal, + }, nil +} + +func (w *headWAL) Stop() error { + return w.wal.Close() +} + +func (w *headWAL) Log(record *WALRecord) error { + if record == nil { + return nil + } + + var buf []byte + + // Always write series before chunks + if len(record.Series.Labels) > 0 { + buf = record.encodeSeries(buf[:0]) + if err := w.wal.Log(buf); err != nil { + return err + } + } + + if len(record.Chks.Chks) > 0 { + buf = record.encodeChunks(buf[:0]) + if err := w.wal.Log(buf); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/storage/stores/tsdb/head_wal_test.go b/pkg/storage/stores/tsdb/head_wal_test.go new file mode 100644 index 0000000000000..258bf4472e037 --- /dev/null +++ b/pkg/storage/stores/tsdb/head_wal_test.go @@ -0,0 +1,102 @@ +package tsdb + +import ( + "testing" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +func Test_Encoding_Series(t *testing.T) { + record := &WALRecord{ + UserID: "foo", + Series: record.RefSeries{ + Ref: chunks.HeadSeriesRef(1), + Labels: mustParseLabels(`{foo="bar"}`), + }, + } + buf := record.encodeSeries(nil) + decoded := &WALRecord{} + + err := decodeWALRecord(buf, decoded) + require.Nil(t, err) + require.Equal(t, record, decoded) +} + +func Test_Encoding_Chunks(t *testing.T) { + record := &WALRecord{ + UserID: "foo", + Chks: ChunkMetasRecord{ + Ref: 1, + Chks: index.ChunkMetas{ + { + Checksum: 1, + MinTime: 1, + MaxTime: 4, + KB: 5, + Entries: 6, + }, + { + Checksum: 2, + MinTime: 5, + MaxTime: 10, + KB: 7, + Entries: 8, + }, + }, + }, + } + buf := record.encodeChunks(nil) + decoded := &WALRecord{} + + err := decodeWALRecord(buf, decoded) + require.Nil(t, err) + require.Equal(t, record, decoded) +} + +func Test_HeadWALLog(t *testing.T) { + dir := t.TempDir() + w, err := newHeadWAL(log.NewNopLogger(), dir, time.Now()) + require.Nil(t, err) + + newSeries := &WALRecord{ + UserID: "foo", + Series: record.RefSeries{Ref: 1, Labels: mustParseLabels(`{foo="bar"}`)}, + Chks: ChunkMetasRecord{ + Chks: []index.ChunkMeta{ + { + Checksum: 1, + MinTime: 1, + MaxTime: 10, + KB: 5, + Entries: 50, + }, + }, + Ref: 1, + }, + } + require.Nil(t, w.Log(newSeries)) + + chunksOnly := &WALRecord{ + UserID: "foo", + Chks: ChunkMetasRecord{ + Chks: []index.ChunkMeta{ + { + Checksum: 2, + MinTime: 5, + MaxTime: 100, + KB: 3, + Entries: 25, + }, + }, + Ref: 1, + }, + } + require.Nil(t, w.Log(chunksOnly)) + require.Nil(t, w.Stop()) +} diff --git a/pkg/storage/stores/tsdb/identifier.go b/pkg/storage/stores/tsdb/identifier.go new file mode 100644 index 0000000000000..14f9cf69ec59d --- /dev/null +++ b/pkg/storage/stores/tsdb/identifier.go @@ -0,0 +1,202 @@ +package tsdb + +import ( + "fmt" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +// Identifier can resolve an index to a name (in object storage) +// and a path (on disk) +type Identifier interface { + Name() string + Path() string +} + +// identifierFromPath will detect whether this is a single or multitenant TSDB +func identifierFromPath(p string) (Identifier, error) { + multiID, multitenant := parseMultitenantTSDBPath(p) + if multitenant { + parent := filepath.Dir(p) + return newPrefixedIdentifier(multiID, parent, ""), nil + } + + id, parent, ok := parseSingleTenantTSDBPath(p) + if !ok { + return nil, fmt.Errorf("invalid tsdb path: %s", p) + } + + return newPrefixedIdentifier(id, parent, ""), nil +} + +func newPrefixedIdentifier(id Identifier, path, name string) prefixedIdentifier { + return prefixedIdentifier{ + Identifier: id, + parentPath: path, + parentName: name, + } +} + +// parentIdentifier wraps an Identifier and prepends to its methods +type prefixedIdentifier struct { + parentPath, parentName string + Identifier +} + +func (p prefixedIdentifier) Path() string { + return filepath.Join(p.parentPath, p.Identifier.Path()) +} + +func (p prefixedIdentifier) Name() string { + return path.Join(p.parentName, p.Identifier.Name()) +} + +func newSuffixedIdentifier(id Identifier, pathSuffix string) suffixedIdentifier { + return suffixedIdentifier{ + pathSuffix: pathSuffix, + Identifier: id, + } +} + +// Generally useful for gzip extensions +type suffixedIdentifier struct { + pathSuffix string + Identifier +} + +func (s suffixedIdentifier) Path() string { + return s.Identifier.Path() + s.pathSuffix +} + +// Identifier has all the information needed to resolve a TSDB index +// Notably this abstracts away OS path separators, etc. +type SingleTenantTSDBIdentifier struct { + Tenant string + From, Through model.Time + Checksum uint32 +} + +func (i SingleTenantTSDBIdentifier) str() string { + return fmt.Sprintf( + "%s-%d-%d-%x.tsdb", + index.IndexFilename, + i.From, + i.Through, + i.Checksum, + ) +} + +func (i SingleTenantTSDBIdentifier) Name() string { + return path.Join(i.Tenant, i.str()) +} + +func (i SingleTenantTSDBIdentifier) Path() string { + return filepath.Join(i.Tenant, i.str()) +} + +func parseSingleTenantTSDBPath(p string) (id SingleTenantTSDBIdentifier, parent string, ok bool) { + // parsing as multitenant didn't work, so try single tenant + file := filepath.Base(p) + parents := filepath.Dir(p) + pathPrefix := filepath.Dir(parents) + tenant := filepath.Base(parents) + + // no tenant was provided + if tenant == "." { + return + } + + // incorrect suffix + trimmed := strings.TrimSuffix(file, ".tsdb") + if trimmed == file { + return + } + + elems := strings.Split(trimmed, "-") + if len(elems) != 4 { + return + } + + if elems[0] != index.IndexFilename { + return + } + + from, err := strconv.ParseInt(elems[1], 10, 64) + if err != nil { + return + } + + through, err := strconv.ParseInt(elems[2], 10, 64) + if err != nil { + return + } + + checksum, err := strconv.ParseInt(elems[3], 16, 32) + if err != nil { + return + } + + return SingleTenantTSDBIdentifier{ + Tenant: tenant, + From: model.Time(from), + Through: model.Time(through), + Checksum: uint32(checksum), + }, pathPrefix, true + +} + +type MultitenantTSDBIdentifier struct { + nodeName string + ts time.Time +} + +func (id MultitenantTSDBIdentifier) Name() string { + return fmt.Sprintf("%d-%s.tsdb", id.ts.Unix(), id.nodeName) +} + +func (id MultitenantTSDBIdentifier) Path() string { + // There are no directories, so reuse name + return id.Name() +} + +func parseMultitenantTSDBPath(p string) (id MultitenantTSDBIdentifier, ok bool) { + cleaned := filepath.Base(p) + return parseMultitenantTSDBNameFromBase(cleaned) +} + +func parseMultitenantTSDBName(p string) (id MultitenantTSDBIdentifier, ok bool) { + cleaned := path.Base(p) + return parseMultitenantTSDBNameFromBase(cleaned) +} + +func parseMultitenantTSDBNameFromBase(name string) (res MultitenantTSDBIdentifier, ok bool) { + + trimmed := strings.TrimSuffix(name, ".tsdb") + + // incorrect suffix + if trimmed == name { + return + } + + xs := strings.Split(trimmed, "-") + if len(xs) != 2 { + return + } + + ts, err := strconv.Atoi(xs[0]) + if err != nil { + return + } + + return MultitenantTSDBIdentifier{ + ts: time.Unix(int64(ts), 0), + nodeName: xs[1], + }, true +} diff --git a/pkg/storage/stores/tsdb/identifier_test.go b/pkg/storage/stores/tsdb/identifier_test.go new file mode 100644 index 0000000000000..619651cd2364f --- /dev/null +++ b/pkg/storage/stores/tsdb/identifier_test.go @@ -0,0 +1,57 @@ +package tsdb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseSingleTenantTSDBPath(t *testing.T) { + for _, tc := range []struct { + desc string + input string + id SingleTenantTSDBIdentifier + parent string + ok bool + }{ + { + desc: "simple_works", + input: "parent/fake/index-1-10-ff.tsdb", + id: SingleTenantTSDBIdentifier{ + Tenant: "fake", + From: 1, + Through: 10, + Checksum: 255, + }, + parent: "parent", + ok: true, + }, + { + desc: "no tenant dir", + input: "index-1-10-ff.tsdb", + ok: false, + }, + { + desc: "wrong index name", + input: "fake/notindex-1-10-ff.tsdb", + ok: false, + }, + { + desc: "wrong argument len", + input: "fake/index-10-ff.tsdb", + ok: false, + }, + { + desc: "wrong argument encoding", + input: "fake/index-ff-10-ff.tsdb", + ok: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + id, parent, ok := parseSingleTenantTSDBPath(tc.input) + require.Equal(t, tc.id, id) + require.Equal(t, tc.parent, parent) + require.Equal(t, tc.ok, ok) + }) + } +} diff --git a/pkg/storage/stores/tsdb/index.go b/pkg/storage/stores/tsdb/index.go index 6b60734b2d0b8..1df7184a50deb 100644 --- a/pkg/storage/stores/tsdb/index.go +++ b/pkg/storage/stores/tsdb/index.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/stores/tsdb/index" ) @@ -32,6 +33,8 @@ func (r ChunkRef) Less(x ChunkRef) bool { type Index interface { Bounded + SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) + Close() error // GetChunkRefs accepts an optional []ChunkRef argument. // If not nil, it will use that slice to build the result, // allowing us to avoid unnecessary allocations at the caller's discretion. @@ -48,3 +51,24 @@ type Index interface { LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) } + +type NoopIndex struct{} + +func (NoopIndex) Close() error { return nil } +func (NoopIndex) Bounds() (from, through model.Time) { return } +func (NoopIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { + return nil, nil +} + +// Series follows the same semantics regarding the passed slice and shard as GetChunkRefs. +func (NoopIndex) Series(ctx context.Context, userID string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { + return nil, nil +} +func (NoopIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { + return nil, nil +} +func (NoopIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { + return nil, nil +} + +func (NoopIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {} diff --git a/pkg/storage/stores/tsdb/index/chunk.go b/pkg/storage/stores/tsdb/index/chunk.go index 1b29340910ee8..e2c2e74f23dcb 100644 --- a/pkg/storage/stores/tsdb/index/chunk.go +++ b/pkg/storage/stores/tsdb/index/chunk.go @@ -63,10 +63,10 @@ func (c ChunkMetas) Less(i, j int) bool { return a.Checksum < b.Checksum } -// finalize sorts and dedupes +// Finalize sorts and dedupes // TODO(owen-d): can we remove the need for this by ensuring we only push // in order and without duplicates? -func (c ChunkMetas) finalize() ChunkMetas { +func (c ChunkMetas) Finalize() ChunkMetas { sort.Sort(c) if len(c) == 0 { diff --git a/pkg/storage/stores/tsdb/index/chunk_test.go b/pkg/storage/stores/tsdb/index/chunk_test.go index 9da566e3b0ab5..f20613c828abc 100644 --- a/pkg/storage/stores/tsdb/index/chunk_test.go +++ b/pkg/storage/stores/tsdb/index/chunk_test.go @@ -135,7 +135,7 @@ func TestChunkMetasFinalize(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.output, tc.input.finalize()) + require.Equal(t, tc.output, tc.input.Finalize()) }) } } diff --git a/pkg/storage/stores/tsdb/index/index.go b/pkg/storage/stores/tsdb/index/index.go index af69a5b64a126..9e90461a8bd17 100644 --- a/pkg/storage/stores/tsdb/index/index.go +++ b/pkg/storage/stores/tsdb/index/index.go @@ -30,10 +30,10 @@ import ( "unsafe" "github.com/pkg/errors" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" tsdb_enc "github.com/prometheus/prometheus/tsdb/encoding" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/grafana/loki/pkg/util/encoding" @@ -134,8 +134,9 @@ type Writer struct { fingerprintOffsets FingerprintOffsets // Hold last series to validate that clients insert new series in order. - lastSeries labels.Labels - lastRef storage.SeriesRef + lastSeries labels.Labels + lastSeriesHash uint64 + lastRef storage.SeriesRef crc32 hash.Hash @@ -436,13 +437,17 @@ func (w *Writer) writeMeta() error { } // AddSeries adds the series one at a time along with its chunks. -func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...ChunkMeta) error { +// Requires a specific fingerprint to be passed in the case where the "desired" +// fingerprint differs from what labels.Hash() produces. For example, +// multitenant TSDBs embed a tenant label, but the actual series has no such +// label and so the derived fingerprint differs. +func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, fp model.Fingerprint, chunks ...ChunkMeta) error { if err := w.ensureStage(idxStageSeries); err != nil { return err } - labelHash := lset.Hash() - lastHash := w.lastSeries.Hash() + labelHash := uint64(fp) + lastHash := w.lastSeriesHash // Ensure series are sorted by the priorities: [`hash(labels)`, `labels`] if (labelHash < lastHash && len(w.lastSeries) > 0) || labelHash == lastHash && labels.Compare(lset, w.lastSeries) < 0 { return errors.Errorf("out-of-order series added with label set %q", lset) @@ -530,6 +535,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... } w.lastSeries = append(w.lastSeries[:0], lset...) + w.lastSeriesHash = labelHash w.lastRef = ref if ref%fingerprintInterval == 0 { @@ -598,7 +604,7 @@ func (w *Writer) finishSymbols() error { } // Load in the symbol table efficiently for the rest of the index writing. - w.symbols, err = NewSymbols(realByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols)) + w.symbols, err = NewSymbols(RealByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols)) if err != nil { return errors.Wrap(err, "read symbols") } @@ -617,7 +623,7 @@ func (w *Writer) writeLabelIndices() error { } defer f.Close() - d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))) + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(f.Bytes()), int(w.fPO.pos))) cnt := w.cntPO current := []byte{} values := []uint32{} @@ -787,7 +793,7 @@ func (w *Writer) writePostingsOffsetTable() error { f.Close() } }() - d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))) + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(f.Bytes()), int(w.fPO.pos))) cnt := w.cntPO for d.Err() == nil && cnt > 0 { w.buf1.Reset() @@ -848,7 +854,9 @@ func (w *Writer) writeFingerprintOffsetsTable() error { // write length ln := w.buf1.Len() - if ln > math.MaxUint32 { + // TODO(owen-d): can remove the uint32 cast in the future + // Had to uint32 wrap these for arm32 builds, which we'll remove in the future. + if uint32(ln) > uint32(math.MaxUint32) { return errors.Errorf("fingerprint offset size exceeds 4 bytes: %d", ln) } @@ -905,7 +913,7 @@ func (w *Writer) writePostingsToTmpFiles() error { // Write out the special all posting. offsets := []uint32{} - d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))) + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(f.Bytes()), int(w.toc.LabelIndices))) d.Skip(int(w.toc.Series)) for d.Len() > 0 { d.ConsumePadding() @@ -951,7 +959,7 @@ func (w *Writer) writePostingsToTmpFiles() error { // Label name -> label value -> positions. postings := map[uint32]map[uint32][]uint32{} - d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))) + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(f.Bytes()), int(w.toc.LabelIndices))) d.Skip(int(w.toc.Series)) for d.Len() > 0 { d.ConsumePadding() @@ -1167,17 +1175,17 @@ type ByteSlice interface { Range(start, end int) []byte } -type realByteSlice []byte +type RealByteSlice []byte -func (b realByteSlice) Len() int { +func (b RealByteSlice) Len() int { return len(b) } -func (b realByteSlice) Range(start, end int) []byte { +func (b RealByteSlice) Range(start, end int) []byte { return b[start:end] } -func (b realByteSlice) Sub(start, end int) ByteSlice { +func (b RealByteSlice) Sub(start, end int) ByteSlice { return b[start:end] } @@ -1187,18 +1195,19 @@ func NewReader(b ByteSlice) (*Reader, error) { return newReader(b, ioutil.NopCloser(nil)) } +type nopCloser struct{} + +func (nopCloser) Close() error { return nil } + // NewFileReader returns a new index reader against the given index file. func NewFileReader(path string) (*Reader, error) { - f, err := fileutil.OpenMmapFile(path) + b, err := ioutil.ReadFile(path) if err != nil { return nil, err } - r, err := newReader(realByteSlice(f.Bytes()), f) + r, err := newReader(RealByteSlice(b), nopCloser{}) if err != nil { - return nil, tsdb_errors.NewMulti( - err, - f.Close(), - ).Err() + return r, err } return r, nil diff --git a/pkg/storage/stores/tsdb/index/index_test.go b/pkg/storage/stores/tsdb/index/index_test.go index 4508827c3c4ad..962e5dfb5c4fb 100644 --- a/pkg/storage/stores/tsdb/index/index_test.go +++ b/pkg/storage/stores/tsdb/index/index_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" @@ -168,10 +169,10 @@ func TestIndexRW_Postings(t *testing.T) { // Postings lists are only written if a series with the respective // reference was added before. - require.NoError(t, iw.AddSeries(1, series[0])) - require.NoError(t, iw.AddSeries(2, series[1])) - require.NoError(t, iw.AddSeries(3, series[2])) - require.NoError(t, iw.AddSeries(4, series[3])) + require.NoError(t, iw.AddSeries(1, series[0], model.Fingerprint(series[0].Hash()))) + require.NoError(t, iw.AddSeries(2, series[1], model.Fingerprint(series[1].Hash()))) + require.NoError(t, iw.AddSeries(3, series[2], model.Fingerprint(series[2].Hash()))) + require.NoError(t, iw.AddSeries(4, series[3], model.Fingerprint(series[3].Hash()))) require.NoError(t, iw.Close()) @@ -257,7 +258,7 @@ func TestPostingsMany(t *testing.T) { }) for i, s := range series { - require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s)) + require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s, model.Fingerprint(s.Hash()))) } require.NoError(t, iw.Close()) @@ -384,7 +385,7 @@ func TestPersistence_index_e2e(t *testing.T) { mi := newMockIndex() for i, s := range input { - err = iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...) + err = iw.AddSeries(storage.SeriesRef(i), s.labels, model.Fingerprint(s.labels.Hash()), s.chunks...) require.NoError(t, err) require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)) @@ -465,14 +466,14 @@ func TestPersistence_index_e2e(t *testing.T) { } func TestDecbufUvarintWithInvalidBuffer(t *testing.T) { - b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) + b := RealByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) db := encoding.NewDecbufUvarintAt(b, 0, castagnoliTable) require.Error(t, db.Err()) } func TestReaderWithInvalidBuffer(t *testing.T) { - b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) + b := RealByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) _, err := NewReader(b) require.Error(t, err) @@ -509,7 +510,7 @@ func TestSymbols(t *testing.T) { checksum := crc32.Checksum(buf.Get()[symbolsStart+4:], castagnoliTable) buf.PutBE32(checksum) // Check sum at the end. - s, err := NewSymbols(realByteSlice(buf.Get()), FormatV2, symbolsStart) + s, err := NewSymbols(RealByteSlice(buf.Get()), FormatV2, symbolsStart) require.NoError(t, err) // We store only 4 offsets to symbols. diff --git a/pkg/storage/stores/tsdb/index_client.go b/pkg/storage/stores/tsdb/index_client.go new file mode 100644 index 0000000000000..d683c81fb6963 --- /dev/null +++ b/pkg/storage/stores/tsdb/index_client.go @@ -0,0 +1,118 @@ +package tsdb + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/querier/astmapper" + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +// implements stores.Index +type IndexClient struct { + schema config.SchemaConfig + idx Index +} + +func NewIndexClient(idx Index, pd config.PeriodConfig) *IndexClient { + return &IndexClient{ + schema: config.SchemaConfig{ + Configs: []config.PeriodConfig{pd}, + }, + idx: idx, + } +} + +// TODO(owen-d): This is a hack for compatibility with how the current query-mapping works. +// Historically, Loki will read the index shard factor and the query planner will inject shard +// labels accordingly. +// In the future, we should use dynamic sharding in TSDB to determine the shard factors +// and we may no longer wish to send a shard label inside the queries, +// but rather expose it as part of the stores.Index interface +func (c *IndexClient) shard(matchers ...*labels.Matcher) ([]*labels.Matcher, *index.ShardAnnotation, error) { + s, shardLabelIndex, err := astmapper.ShardFromMatchers(matchers) + if err != nil { + return nil, nil, err + } + + var shard *index.ShardAnnotation + if s != nil { + matchers = append(matchers[:shardLabelIndex], matchers[shardLabelIndex+1:]...) + shard = &index.ShardAnnotation{ + Shard: uint32(s.Shard), + Of: uint32(s.Of), + } + } + + return matchers, shard, err + +} + +// TODO(owen-d): synchronize logproto.ChunkRef and tsdb.ChunkRef so we don't have to convert. +// They share almost the same fields, so we can add the missing `KB` field to the proto and then +// use that within the tsdb package. +func (c *IndexClient) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]logproto.ChunkRef, error) { + matchers, shard, err := c.shard(matchers...) + if err != nil { + return nil, err + } + + // TODO(owen-d): use a pool to reduce allocs here + chks, err := c.idx.GetChunkRefs(ctx, userID, from, through, nil, shard, matchers...) + if err != nil { + return nil, err + } + + refs := make([]logproto.ChunkRef, 0, len(chks)) + for _, chk := range chks { + refs = append(refs, logproto.ChunkRef{ + Fingerprint: uint64(chk.Fingerprint), + UserID: chk.User, + From: chk.Start, + Through: chk.End, + Checksum: chk.Checksum, + }) + } + + return refs, err +} + +func (c *IndexClient) GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error) { + matchers, shard, err := c.shard(matchers...) + if err != nil { + return nil, err + } + + xs, err := c.idx.Series(ctx, userID, from, through, nil, shard, matchers...) + if err != nil { + return nil, err + } + + res := make([]labels.Labels, 0, len(xs)) + for _, x := range xs { + res = append(res, x.Labels) + } + return res, nil +} + +// tsdb no longer uses the __metric_name__="logs" hack, so we can ignore metric names! +func (c *IndexClient) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) { + return c.idx.LabelValues(ctx, userID, from, through, labelName, matchers...) +} + +// tsdb no longer uses the __metric_name__="logs" hack, so we can ignore metric names! +func (c *IndexClient) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { + return c.idx.LabelNames(ctx, userID, from, through) +} + +// SetChunkFilterer sets a chunk filter to be used when retrieving chunks. +// This is only used for GetSeries implementation. +// Todo we might want to pass it as a parameter to GetSeries instead. +func (c *IndexClient) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + c.idx.SetChunkFilterer(chunkFilter) +} diff --git a/pkg/storage/stores/tsdb/lazy_index.go b/pkg/storage/stores/tsdb/lazy_index.go new file mode 100644 index 0000000000000..9a3da11b565cf --- /dev/null +++ b/pkg/storage/stores/tsdb/lazy_index.go @@ -0,0 +1,66 @@ +package tsdb + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +// Index adapter for a function which returns an index when queried. +type LazyIndex func() (Index, error) + +func (f LazyIndex) Bounds() (model.Time, model.Time) { + i, err := f() + if err != nil { + return 0, 0 + } + return i.Bounds() +} + +func (f LazyIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + i, err := f() + if err == nil { + i.SetChunkFilterer(chunkFilter) + } +} + +func (f LazyIndex) Close() error { + i, err := f() + if err != nil { + return err + } + return i.Close() +} + +func (f LazyIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { + i, err := f() + if err != nil { + return nil, err + } + return i.GetChunkRefs(ctx, userID, from, through, res, shard, matchers...) +} +func (f LazyIndex) Series(ctx context.Context, userID string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { + i, err := f() + if err != nil { + return nil, err + } + return i.Series(ctx, userID, from, through, res, shard, matchers...) +} +func (f LazyIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { + i, err := f() + if err != nil { + return nil, err + } + return i.LabelNames(ctx, userID, from, through, matchers...) +} +func (f LazyIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { + i, err := f() + if err != nil { + return nil, err + } + return i.LabelValues(ctx, userID, from, through, name, matchers...) +} diff --git a/pkg/storage/stores/tsdb/manager.go b/pkg/storage/stores/tsdb/manager.go new file mode 100644 index 0000000000000..1ee6e04ccfc66 --- /dev/null +++ b/pkg/storage/stores/tsdb/manager.go @@ -0,0 +1,284 @@ +package tsdb + +import ( + "context" + "fmt" + "math" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/stores/indexshipper" + shipper_index "github.com/grafana/loki/pkg/storage/stores/indexshipper/index" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +// nolint:revive +// TSDBManager wraps the index shipper and writes/manages +// TSDB files on disk +type TSDBManager interface { + Index + // Builds a new TSDB file from a set of WALs + BuildFromWALs(time.Time, []WALIdentifier) error +} + +/* +tsdbManager is responsible for: + * Turning WALs into optimized multi-tenant TSDBs when requested + * Serving reads from these TSDBs + * Shipping them to remote storage + * Keeping them available for querying + * Removing old TSDBs which are no longer needed +*/ +type tsdbManager struct { + indexPeriod time.Duration + nodeName string // node name + log log.Logger + dir string + metrics *Metrics + + sync.RWMutex + + chunkFilter chunk.RequestChunkFilterer + shipper indexshipper.IndexShipper +} + +func NewTSDBManager( + nodeName, + dir string, + shipper indexshipper.IndexShipper, + indexPeriod time.Duration, + logger log.Logger, + metrics *Metrics, +) TSDBManager { + return &tsdbManager{ + indexPeriod: indexPeriod, + nodeName: nodeName, + log: log.With(logger, "component", "tsdb-manager"), + dir: dir, + metrics: metrics, + shipper: shipper, + } +} + +func (m *tsdbManager) BuildFromWALs(t time.Time, ids []WALIdentifier) (err error) { + level.Debug(m.log).Log("msg", "building WALs", "n", len(ids), "ts", t) + // get relevant wals + // iterate them, build tsdb in scratch dir + defer func() { + m.metrics.tsdbCreationsTotal.Inc() + if err != nil { + m.metrics.tsdbCreationFailures.Inc() + } + }() + + level.Debug(m.log).Log("msg", "recovering tenant heads") + tmp := newTenantHeads(t, defaultHeadManagerStripeSize, m.metrics, m.log) + if err = recoverHead(m.dir, tmp, ids); err != nil { + return errors.Wrap(err, "building TSDB from WALs") + } + + periods := make(map[int]*Builder) + + if err := tmp.forAll(func(user string, ls labels.Labels, chks index.ChunkMetas) { + + // chunks may overlap index period bounds, in which case they're written to multiple + pds := make(map[int]index.ChunkMetas) + for _, chk := range chks { + for _, bucket := range indexBuckets(m.indexPeriod, chk.From(), chk.Through()) { + pds[bucket] = append(pds[bucket], chk) + } + } + + // Embed the tenant label into TSDB + lb := labels.NewBuilder(ls) + lb.Set(TenantLabel, user) + withTenant := lb.Labels() + + // Add the chunks to all relevant builders + for pd, matchingChks := range pds { + b, ok := periods[pd] + if !ok { + b = NewBuilder() + periods[pd] = b + } + + b.AddSeries( + withTenant, + // use the fingerprint without the added tenant label + // so queries route to the chunks which actually exist. + model.Fingerprint(ls.Hash()), + matchingChks, + ) + } + + }); err != nil { + level.Error(m.log).Log("err", err.Error(), "msg", "building TSDB from WALs") + return err + } + + for p, b := range periods { + + dstDir := filepath.Join(managerMultitenantDir(m.dir), fmt.Sprint(p)) + dst := newPrefixedIdentifier( + MultitenantTSDBIdentifier{ + nodeName: m.nodeName, + ts: t, + }, + dstDir, + "", + ) + + level.Debug(m.log).Log("msg", "building tsdb for period", "pd", p, "dst", dst.Path()) + // build+move tsdb to multitenant dir + start := time.Now() + _, err = b.Build( + context.Background(), + managerScratchDir(m.dir), + func(from, through model.Time, checksum uint32) Identifier { + return dst + }, + ) + if err != nil { + return err + } + + level.Debug(m.log).Log("msg", "finished building tsdb for period", "pd", p, "dst", dst.Path(), "duration", time.Since(start)) + + loaded, err := NewShippableTSDBFile(dst, false) + if err != nil { + return err + } + + if err := m.shipper.AddIndex(fmt.Sprintf("%d", p), "", loaded); err != nil { + return err + } + } + + return nil +} + +func indexBuckets(indexPeriod time.Duration, from, through model.Time) (res []int) { + start := from.Time().UnixNano() / int64(indexPeriod) + end := through.Time().UnixNano() / int64(indexPeriod) + for cur := start; cur <= end; cur++ { + res = append(res, int(cur)) + } + return +} + +func (m *tsdbManager) indices(ctx context.Context, from, through model.Time, user string) (Index, error) { + var indices []Index + + // Ensure we query both per tenant and multitenant TSDBs + + for _, bkt := range indexBuckets(m.indexPeriod, from, through) { + if err := m.shipper.ForEach(ctx, fmt.Sprintf("%d", bkt), user, func(idx shipper_index.Index) error { + _, multitenant := parseMultitenantTSDBName(idx.Name()) + impl, ok := idx.(Index) + if !ok { + return fmt.Errorf("unexpected shipper index type: %T", idx) + } + if multitenant { + indices = append(indices, NewMultiTenantIndex(impl)) + } else { + indices = append(indices, impl) + } + return nil + }); err != nil { + return nil, err + } + + } + + if len(indices) == 0 { + return NoopIndex{}, nil + } + idx, err := NewMultiIndex(indices...) + if err != nil { + return nil, err + } + + if m.chunkFilter != nil { + idx.SetChunkFilterer(m.chunkFilter) + } + return idx, nil +} + +// TODO(owen-d): how to better implement this? +// setting 0->maxint will force the tsdbmanager to always query +// underlying tsdbs, which is safe, but can we optimize this? +func (m *tsdbManager) Bounds() (model.Time, model.Time) { + return 0, math.MaxInt64 +} + +func (m *tsdbManager) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + m.chunkFilter = chunkFilter +} + +// Close implements Index.Close, but we offload this responsibility +// to the index shipper +func (m *tsdbManager) Close() error { + return nil +} + +func (m *tsdbManager) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { + idx, err := m.indices(ctx, from, through, userID) + if err != nil { + return nil, err + } + matchers = withoutNameLabel(matchers) + return idx.GetChunkRefs(ctx, userID, from, through, res, shard, matchers...) +} + +func (m *tsdbManager) Series(ctx context.Context, userID string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { + idx, err := m.indices(ctx, from, through, userID) + if err != nil { + return nil, err + } + matchers = withoutNameLabel(matchers) + return idx.Series(ctx, userID, from, through, res, shard, matchers...) +} + +func (m *tsdbManager) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { + idx, err := m.indices(ctx, from, through, userID) + if err != nil { + return nil, err + } + matchers = withoutNameLabel(matchers) + return idx.LabelNames(ctx, userID, from, through, matchers...) +} + +func (m *tsdbManager) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { + idx, err := m.indices(ctx, from, through, userID) + if err != nil { + return nil, err + } + matchers = withoutNameLabel(matchers) + return idx.LabelValues(ctx, userID, from, through, name, matchers...) +} + +// TODO(owen-d): in the future, handle this by preventing passing the __name__="logs" label +// to TSDB indices at all. +func withoutNameLabel(matchers []*labels.Matcher) []*labels.Matcher { + if len(matchers) == 0 { + return nil + } + + dst := make([]*labels.Matcher, 0, len(matchers)-1) + for _, m := range matchers { + if m.Name == labels.MetricName { + continue + } + dst = append(dst, m) + } + + return dst +} diff --git a/pkg/storage/stores/tsdb/multi_file_index.go b/pkg/storage/stores/tsdb/multi_file_index.go index daa6240f58bc8..9a74455a0fee6 100644 --- a/pkg/storage/stores/tsdb/multi_file_index.go +++ b/pkg/storage/stores/tsdb/multi_file_index.go @@ -3,35 +3,25 @@ package tsdb import ( "context" "errors" - "sort" + "github.com/grafana/dskit/multierror" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "golang.org/x/sync/errgroup" + "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/stores/tsdb/index" ) type MultiIndex struct { - indices []*TSDBIndex + indices []Index } -func NewMultiIndex(indices ...*TSDBIndex) (*MultiIndex, error) { +func NewMultiIndex(indices ...Index) (*MultiIndex, error) { if len(indices) == 0 { return nil, errors.New("must supply at least one index") } - sort.Slice(indices, func(i, j int) bool { - aFrom, aThrough := indices[i].Bounds() - bFrom, bThrough := indices[j].Bounds() - - if aFrom != bFrom { - return aFrom < bFrom - } - // tiebreaker uses through - return aThrough <= bThrough - }) - return &MultiIndex{indices: indices}, nil } @@ -51,7 +41,24 @@ func (i *MultiIndex) Bounds() (model.Time, model.Time) { return lowest, highest } -func (i *MultiIndex) forIndices(ctx context.Context, from, through model.Time, fn func(context.Context, *TSDBIndex) (interface{}, error)) ([]interface{}, error) { +func (i *MultiIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + for _, x := range i.indices { + x.SetChunkFilterer(chunkFilter) + } +} + +func (i *MultiIndex) Close() error { + var errs multierror.MultiError + for _, idx := range i.indices { + if err := idx.Close(); err != nil { + errs = append(errs, err) + } + } + return errs.Err() + +} + +func (i *MultiIndex) forIndices(ctx context.Context, from, through model.Time, fn func(context.Context, Index) (interface{}, error)) ([]interface{}, error) { queryBounds := newBounds(from, through) g, ctx := errgroup.WithContext(ctx) @@ -64,14 +71,20 @@ func (i *MultiIndex) forIndices(ctx context.Context, from, through model.Time, f if Overlap(queryBounds, idx) { // run all queries in linked goroutines (cancel after first err), // bounded by parallelism controls if applicable. - g.Go(func() error { - got, err := fn(ctx, idx) - if err != nil { - return err - } - ch <- got - return nil - }) + + // must wrap g.Go in anonymous function to capture + // idx variable during iteration + func(idx Index) { + g.Go(func() error { + got, err := fn(ctx, idx) + if err != nil { + return err + } + ch <- got + return nil + }) + }(idx) + } } @@ -94,7 +107,7 @@ func (i *MultiIndex) GetChunkRefs(ctx context.Context, userID string, from, thro } res = res[:0] - groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx *TSDBIndex) (interface{}, error) { + groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx Index) (interface{}, error) { return idx.GetChunkRefs(ctx, userID, from, through, nil, shard, matchers...) }) if err != nil { @@ -128,7 +141,7 @@ func (i *MultiIndex) Series(ctx context.Context, userID string, from, through mo } res = res[:0] - groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx *TSDBIndex) (interface{}, error) { + groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx Index) (interface{}, error) { return idx.Series(ctx, userID, from, through, nil, shard, matchers...) }) if err != nil { @@ -154,7 +167,7 @@ func (i *MultiIndex) Series(ctx context.Context, userID string, from, through mo } func (i *MultiIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { - groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx *TSDBIndex) (interface{}, error) { + groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx Index) (interface{}, error) { return idx.LabelNames(ctx, userID, from, through, matchers...) }) if err != nil { @@ -189,7 +202,7 @@ func (i *MultiIndex) LabelNames(ctx context.Context, userID string, from, throug } func (i *MultiIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { - groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx *TSDBIndex) (interface{}, error) { + groups, err := i.forIndices(ctx, from, through, func(ctx context.Context, idx Index) (interface{}, error) { return idx.LabelValues(ctx, userID, from, through, name, matchers...) }) if err != nil { diff --git a/pkg/storage/stores/tsdb/multi_file_index_test.go b/pkg/storage/stores/tsdb/multi_file_index_test.go index fc5b0253ce57b..aaf00324b65e7 100644 --- a/pkg/storage/stores/tsdb/multi_file_index_test.go +++ b/pkg/storage/stores/tsdb/multi_file_index_test.go @@ -58,7 +58,7 @@ func TestMultiIndex(t *testing.T) { // group 5 indices together, all with duplicate data n := 5 - var indices []*TSDBIndex + var indices []Index dir := t.TempDir() for i := 0; i < n; i++ { indices = append(indices, BuildIndex(t, dir, "fake", cases)) diff --git a/pkg/storage/stores/tsdb/multitenant.go b/pkg/storage/stores/tsdb/multitenant.go new file mode 100644 index 0000000000000..f3a26aa7db333 --- /dev/null +++ b/pkg/storage/stores/tsdb/multitenant.go @@ -0,0 +1,74 @@ +package tsdb + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/stores/tsdb/index" +) + +// TenantLabel is part of the reserved label namespace (__ prefix) +// It's used to create multi-tenant TSDBs (which do not have a tenancy concept) +// These labels are stripped out during compaction to single-tenant TSDBs +const TenantLabel = "__loki_tenant__" + +// MultiTenantIndex will inject a tenant label to it's queries +// This works with pre-compacted TSDBs which aren't yet per tenant. +type MultiTenantIndex struct { + idx Index +} + +func NewMultiTenantIndex(idx Index) *MultiTenantIndex { + return &MultiTenantIndex{idx: idx} +} + +func withTenantLabel(userID string, matchers []*labels.Matcher) []*labels.Matcher { + cpy := make([]*labels.Matcher, len(matchers)) + copy(cpy, matchers) + cpy = append(cpy, labels.MustNewMatcher(labels.MatchEqual, TenantLabel, userID)) + return cpy +} + +func withoutTenantLabel(ls labels.Labels) labels.Labels { + for i, l := range ls { + if l.Name == TenantLabel { + ls = append(ls[:i], ls[i+1:]...) + break + } + } + return ls +} + +func (m *MultiTenantIndex) Bounds() (model.Time, model.Time) { return m.idx.Bounds() } + +func (m *MultiTenantIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + m.idx.SetChunkFilterer(chunkFilter) +} + +func (m *MultiTenantIndex) Close() error { return m.idx.Close() } + +func (m *MultiTenantIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { + return m.idx.GetChunkRefs(ctx, userID, from, through, res, shard, withTenantLabel(userID, matchers)...) +} + +func (m *MultiTenantIndex) Series(ctx context.Context, userID string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { + xs, err := m.idx.Series(ctx, userID, from, through, res, shard, withTenantLabel(userID, matchers)...) + if err != nil { + return nil, err + } + for i := range xs { + xs[i].Labels = withoutTenantLabel(xs[i].Labels) + } + return xs, nil +} + +func (m *MultiTenantIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { + return m.idx.LabelNames(ctx, userID, from, through, withTenantLabel(userID, matchers)...) +} + +func (m *MultiTenantIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { + return m.idx.LabelValues(ctx, userID, from, through, name, withTenantLabel(userID, matchers)...) +} diff --git a/pkg/storage/stores/tsdb/querier_test.go b/pkg/storage/stores/tsdb/querier_test.go index 013aed0a6e5af..3aca71057d3d1 100644 --- a/pkg/storage/stores/tsdb/querier_test.go +++ b/pkg/storage/stores/tsdb/querier_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -21,7 +22,7 @@ func mustParseLabels(s string) labels.Labels { func TestQueryIndex(t *testing.T) { dir := t.TempDir() - b := index.NewBuilder() + b := NewBuilder() cases := []struct { labels labels.Labels chunks []index.ChunkMeta @@ -85,13 +86,21 @@ func TestQueryIndex(t *testing.T) { }, } for _, s := range cases { - b.AddSeries(s.labels, s.chunks) + b.AddSeries(s.labels, model.Fingerprint(s.labels.Hash()), s.chunks) } - dst, err := b.Build(context.Background(), dir, "fake") + dst, err := b.Build(context.Background(), dir, func(from, through model.Time, checksum uint32) Identifier { + id := SingleTenantTSDBIdentifier{ + Tenant: "fake", + From: from, + Through: through, + Checksum: checksum, + } + return newPrefixedIdentifier(id, dir, dir) + }) require.Nil(t, err) - reader, err := index.NewFileReader(dst.FilePath(dir)) + reader, err := index.NewFileReader(dst.Path()) require.Nil(t, err) p, err := PostingsForMatchers(reader, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) diff --git a/pkg/storage/stores/tsdb/single_file_index.go b/pkg/storage/stores/tsdb/single_file_index.go index ddbe8080ee353..9937d1fd27085 100644 --- a/pkg/storage/stores/tsdb/single_file_index.go +++ b/pkg/storage/stores/tsdb/single_file_index.go @@ -1,30 +1,114 @@ package tsdb import ( + "bytes" "context" + "io" + "io/ioutil" + "strings" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/loki/pkg/chunkenc" + "github.com/grafana/loki/pkg/storage/chunk" + index_shipper "github.com/grafana/loki/pkg/storage/stores/indexshipper/index" "github.com/grafana/loki/pkg/storage/stores/tsdb/index" ) -func LoadTSDBIdentifier(dir string, id index.Identifier) (*TSDBIndex, error) { - return LoadTSDB(id.FilePath(dir)) +const ( + gzipSuffix = ".gz" +) + +func OpenShippableTSDB(p string) (index_shipper.Index, error) { + var gz bool + trimmed := strings.TrimSuffix(p, gzipSuffix) + if trimmed != p { + gz = true + } + + id, err := identifierFromPath(trimmed) + if err != nil { + return nil, err + } + + return NewShippableTSDBFile(id, gz) +} + +// nolint +// TSDBFile is backed by an actual file and implements the indexshipper/index.Index interface +type TSDBFile struct { + // reuse Identifier for resolving locations + Identifier + + // reuse TSDBIndex for reading + Index + + // to sastisfy Reader() and Close() methods + r io.ReadSeeker } -func LoadTSDB(name string) (*TSDBIndex, error) { - reader, err := index.NewFileReader(name) +func NewShippableTSDBFile(id Identifier, gzip bool) (*TSDBFile, error) { + if gzip { + id = newSuffixedIdentifier(id, gzipSuffix) + } + + idx, b, err := NewTSDBIndexFromFile(id.Path(), gzip) if err != nil { return nil, err } - return NewTSDBIndex(reader), nil + return &TSDBFile{ + Identifier: id, + Index: idx, + r: bytes.NewReader(b), + }, err +} + +func (f *TSDBFile) Close() error { + return f.Index.Close() +} + +func (f *TSDBFile) Reader() (io.ReadSeeker, error) { + return f.r, nil } // nolint +// TSDBIndex is backed by an IndexReader +// and translates the IndexReader to an Index implementation +// It loads the file into memory and doesn't keep a file descriptor open type TSDBIndex struct { - reader IndexReader + reader IndexReader + chunkFilter chunk.RequestChunkFilterer +} + +// Return the index as well as the underlying []byte which isn't exposed as an index +// method but is helpful for building an io.reader for the index shipper +func NewTSDBIndexFromFile(location string, gzip bool) (*TSDBIndex, []byte, error) { + raw, err := ioutil.ReadFile(location) + if err != nil { + return nil, nil, err + } + + cleaned := raw + + // decompress if needed + if gzip { + r := chunkenc.Gzip.GetReader(bytes.NewReader(raw)) + defer chunkenc.Gzip.PutReader(r) + + var err error + cleaned, err = io.ReadAll(r) + if err != nil { + return nil, nil, err + } + } + + reader, err := index.NewReader(index.RealByteSlice(cleaned)) + if err != nil { + return nil, nil, err + } + return NewTSDBIndex(reader), cleaned, nil } func NewTSDBIndex(reader IndexReader) *TSDBIndex { @@ -42,9 +126,14 @@ func (i *TSDBIndex) Bounds() (model.Time, model.Time) { return model.Time(from), model.Time(through) } +func (i *TSDBIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) { + i.chunkFilter = chunkFilter +} + // fn must NOT capture it's arguments. They're reused across series iterations and returned to // a pool after completion. func (i *TSDBIndex) forSeries( + ctx context.Context, shard *index.ShardAnnotation, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta), matchers ...*labels.Matcher, @@ -58,6 +147,11 @@ func (i *TSDBIndex) forSeries( chks := ChunkMetasPool.Get() defer ChunkMetasPool.Put(chks) + var filterer chunk.Filterer + if i.chunkFilter != nil { + filterer = i.chunkFilter.ForRequest(ctx) + } + for p.Next() { hash, err := i.reader.Series(p.At(), &ls, &chks) if err != nil { @@ -69,19 +163,23 @@ func (i *TSDBIndex) forSeries( continue } + if filterer != nil && filterer.ShouldFilter(ls) { + continue + } + fn(ls, model.Fingerprint(hash), chks) } return p.Err() } -func (i *TSDBIndex) GetChunkRefs(_ context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { +func (i *TSDBIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { queryBounds := newBounds(from, through) if res == nil { res = ChunkRefsPool.Get() } res = res[:0] - if err := i.forSeries(shard, + if err := i.forSeries(ctx, shard, func(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) { // TODO(owen-d): use logarithmic approach for _, chk := range chks { @@ -107,14 +205,14 @@ func (i *TSDBIndex) GetChunkRefs(_ context.Context, userID string, from, through return res, nil } -func (i *TSDBIndex) Series(_ context.Context, _ string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { +func (i *TSDBIndex) Series(ctx context.Context, _ string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { queryBounds := newBounds(from, through) if res == nil { res = SeriesPool.Get() } res = res[:0] - if err := i.forSeries(shard, + if err := i.forSeries(ctx, shard, func(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) { // TODO(owen-d): use logarithmic approach for _, chk := range chks { @@ -154,9 +252,9 @@ func (i *TSDBIndex) Checksum() uint32 { return i.reader.Checksum() } -func (i *TSDBIndex) Identifier(tenant string) index.Identifier { +func (i *TSDBIndex) Identifier(tenant string) SingleTenantTSDBIdentifier { lower, upper := i.Bounds() - return index.Identifier{ + return SingleTenantTSDBIdentifier{ Tenant: tenant, From: lower, Through: upper, diff --git a/pkg/storage/stores/tsdb/single_file_index_test.go b/pkg/storage/stores/tsdb/single_file_index_test.go index f5bd7988b91f6..0105afcda38db 100644 --- a/pkg/storage/stores/tsdb/single_file_index_test.go +++ b/pkg/storage/stores/tsdb/single_file_index_test.go @@ -59,23 +59,22 @@ func TestSingleIdx(t *testing.T) { for _, variant := range []struct { desc string - fn func() *TSDBIndex + fn func() Index }{ { desc: "file", - fn: func() *TSDBIndex { + fn: func() Index { return BuildIndex(t, t.TempDir(), "fake", cases) }, }, { desc: "head", - fn: func() *TSDBIndex { - head := NewHead("fake", NewHeadMetrics(nil), log.NewNopLogger()) + fn: func() Index { + head := NewHead("fake", NewMetrics(nil), log.NewNopLogger()) for _, x := range cases { - head.Append(x.Labels, x.Chunks) + _, _ = head.Append(x.Labels, x.Chunks) } - reader, err := head.Index() - require.Nil(t, err) + reader := head.Index() return NewTSDBIndex(reader) }, }, diff --git a/pkg/storage/stores/tsdb/util_test.go b/pkg/storage/stores/tsdb/util_test.go index 22b30189f8634..4b95d0c381a7c 100644 --- a/pkg/storage/stores/tsdb/util_test.go +++ b/pkg/storage/stores/tsdb/util_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -15,18 +16,25 @@ type LoadableSeries struct { Chunks index.ChunkMetas } -func BuildIndex(t *testing.T, dir, tenant string, cases []LoadableSeries) *TSDBIndex { - b := index.NewBuilder() +func BuildIndex(t *testing.T, dir, tenant string, cases []LoadableSeries) *TSDBFile { + b := NewBuilder() for _, s := range cases { - b.AddSeries(s.Labels, s.Chunks) + b.AddSeries(s.Labels, model.Fingerprint(s.Labels.Hash()), s.Chunks) } - dst, err := b.Build(context.Background(), dir, tenant) + dst, err := b.Build(context.Background(), dir, func(from, through model.Time, checksum uint32) Identifier { + id := SingleTenantTSDBIdentifier{ + Tenant: tenant, + From: from, + Through: through, + Checksum: checksum, + } + return newPrefixedIdentifier(id, dir, dir) + }) require.Nil(t, err) - location := dst.FilePath(dir) - idx, err := LoadTSDB(location) + idx, err := NewShippableTSDBFile(dst, false) require.Nil(t, err) return idx } diff --git a/pkg/util/wal/reader.go b/pkg/util/wal/reader.go new file mode 100644 index 0000000000000..f94b10b84b9cd --- /dev/null +++ b/pkg/util/wal/reader.go @@ -0,0 +1,42 @@ +package wal + +import ( + "errors" + "io" + + "github.com/prometheus/prometheus/tsdb/wal" +) + +// If startSegment is <0, it means all the segments. +func NewWalReader(dir string, startSegment int) (*wal.Reader, io.Closer, error) { + var ( + segmentReader io.ReadCloser + err error + ) + if startSegment < 0 { + segmentReader, err = wal.NewSegmentsReader(dir) + if err != nil { + return nil, nil, err + } + } else { + first, last, err := wal.Segments(dir) + if err != nil { + return nil, nil, err + } + if startSegment > last { + return nil, nil, errors.New("start segment is beyond the last WAL segment") + } + if first > startSegment { + startSegment = first + } + segmentReader, err = wal.NewSegmentsRangeReader(wal.SegmentRange{ + Dir: dir, + First: startSegment, + Last: -1, // Till the end. + }) + if err != nil { + return nil, nil, err + } + } + return wal.NewReader(segmentReader), segmentReader, nil +} diff --git a/tools/tsdb/tsdb-map/main.go b/tools/tsdb/tsdb-map/main.go index a648a746ded8d..308afff92adad 100644 --- a/tools/tsdb/tsdb-map/main.go +++ b/tools/tsdb/tsdb-map/main.go @@ -7,12 +7,14 @@ import ( "log" "strconv" + "github.com/prometheus/common/model" "go.etcd.io/bbolt" "gopkg.in/yaml.v2" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/shipper/compactor/retention" shipper_util "github.com/grafana/loki/pkg/storage/stores/shipper/util" + "github.com/grafana/loki/pkg/storage/stores/tsdb" "github.com/grafana/loki/pkg/storage/stores/tsdb/index" ) @@ -64,7 +66,7 @@ func main() { panic(err) } - builder := index.NewBuilder() + builder := tsdb.NewBuilder() log.Println("Loading index into memory") @@ -80,7 +82,7 @@ func main() { return it.Err() } entry := it.Entry() - builder.AddSeries(entry.Labels, []index.ChunkMeta{{ + builder.AddSeries(entry.Labels, model.Fingerprint(entry.Labels.Hash()), []index.ChunkMeta{{ Checksum: extractChecksumFromChunkID(entry.ChunkID), MinTime: int64(entry.From), MaxTime: int64(entry.Through), @@ -95,7 +97,9 @@ func main() { } log.Println("writing index") - if _, err := builder.Build(context.Background(), *dest, "fake"); err != nil { + if _, err := builder.Build(context.Background(), *dest, func(from, through model.Time, checksum uint32) tsdb.Identifier { + panic("todo") + }); err != nil { panic(err) } } From ebb39db864f56986cb8385166d220980fd8be684 Mon Sep 17 00:00:00 2001 From: Artem Timchenko <77285301+timchenko-a@users.noreply.github.com> Date: Thu, 5 May 2022 14:22:00 +0300 Subject: [PATCH 073/223] lambda-promtail: Add multi-tenancy support (#6102) * lambda-promtail: adding multi-tenancy support * update changelog and fix import order * fix import order * fix import order --- CHANGELOG.md | 1 + docs/sources/clients/lambda-promtail/_index.md | 6 ++++++ tools/lambda-promtail/README.md | 4 ++-- tools/lambda-promtail/lambda-promtail/main.go | 14 ++++++++------ tools/lambda-promtail/lambda-promtail/promtail.go | 4 ++++ tools/lambda-promtail/main.tf | 1 + tools/lambda-promtail/template.yaml | 5 +++++ tools/lambda-promtail/variables.tf | 6 ++++++ 8 files changed, 33 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbbb274439f93..53d8e56b44fe2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +* [6102](https://github.com/grafana/loki/pull/6102) **timchenko-a**: Add multi-tenancy support to lambda-promtail * [5971](https://github.com/grafana/loki/pull/5971) **kavirajk**: Record statistics about metadata queries such as labels and series queries in `metrics.go` as well * [5790](https://github.com/grafana/loki/pull/5790) **chaudum**: Add UDP support for Promtail's syslog target. * [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. diff --git a/docs/sources/clients/lambda-promtail/_index.md b/docs/sources/clients/lambda-promtail/_index.md index 01e10254f3d1c..e17a7167bc269 100644 --- a/docs/sources/clients/lambda-promtail/_index.md +++ b/docs/sources/clients/lambda-promtail/_index.md @@ -22,6 +22,8 @@ There's also a flag to keep the log stream label when propagating the logs from Additionally, an environment variable can be configured to add extra lables to the logs streamed by lambda-protmail. These extra labels will take the form `__extra_=` +Optional environment variable can be configured to add tenant id to the logs streamed by lambda-protmail. + In an effort to make deployment of lambda-promtail as simple as possible, we've created a [public ECR repo](https://gallery.ecr.aws/grafana/lambda-promtail) to publish our builds of lambda-promtail. Users are still able to clone this repo, make their own modifications to the Go code, and upload their own image to their own ECR repo if they wish. ### Examples @@ -42,6 +44,8 @@ To keep the log group label add `-var "keep_stream=true"`. To add extra labels add `-var 'extra_labels="name1,value1,name2,value2"'` +To add tenant id add `-var "tenant_id=value"` + Note that the creation of subscription filter on Cloudwatch in the provided Terraform file only accepts an array of log group names, it does **not** accept strings for regex filtering on the logs contents via the subscription filters. We suggest extending the Terraform file to do so, or having lambda-promtail write to Promtail and using [pipeline stages](https://grafana.com/docs/loki/latest/clients/promtail/stages/drop/). CloudFormation: @@ -63,6 +67,8 @@ To keep the log group label add `ParameterKey=KeepStream,ParameterValue=true`. To add extra labels, include `ParameterKey=ExtraLabels,ParameterValue="name1,value1,name2,value2"` +To add tenant id add `ParameterKey=TenantID,ParameterValue=value`. + To modify an already created CloudFormation stack you need to use [update-stack](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/update-stack.html). ## Uses diff --git a/tools/lambda-promtail/README.md b/tools/lambda-promtail/README.md index f2188a476bde6..b828657683a99 100644 --- a/tools/lambda-promtail/README.md +++ b/tools/lambda-promtail/README.md @@ -50,13 +50,13 @@ Also, if your deployment requires a [VPC configuration](https://registry.terrafo Then use Terraform to deploy: ```bash -terraform apply -var ":" -var "write_address=https://your-loki-url/loki/api/v1/push" -var "password=" -var "username=" -var 'log_group_names=["log-group-01", "log-group-02"]' -var 'extra_labels="name1,value1,name2,value2"' +terraform apply -var ":" -var "write_address=https://your-loki-url/loki/api/v1/push" -var "password=" -var "username=" -var 'log_group_names=["log-group-01", "log-group-02"]' -var 'extra_labels="name1,value1,name2,value2"' -var "tenant_id=" ``` or CloudFormation: ```bash -aws cloudformation create-stack --stack-name lambda-promtail-stack --template-body file://template.yaml --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --region us-east-2 --parameters ParameterKey=WriteAddress,ParameterValue=https://your-loki-url/loki/api/v1/push ParameterKey=Username,ParameterValue= ParameterKey=Password,ParameterValue= ParameterKey=LambdaPromtailImage,ParameterValue=: ParameterKey=ExtraLabels,ParameterValue="name1,value1,name2,value2" +aws cloudformation create-stack --stack-name lambda-promtail-stack --template-body file://template.yaml --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --region us-east-2 --parameters ParameterKey=WriteAddress,ParameterValue=https://your-loki-url/loki/api/v1/push ParameterKey=Username,ParameterValue= ParameterKey=Password,ParameterValue= ParameterKey=LambdaPromtailImage,ParameterValue=: ParameterKey=ExtraLabels,ParameterValue="name1,value1,name2,value2" ParameterKey=TenantID,ParameterValue= ``` # Appendix diff --git a/tools/lambda-promtail/lambda-promtail/main.go b/tools/lambda-promtail/lambda-promtail/main.go index 3b8a9857c8610..9aca2126f9d9c 100644 --- a/tools/lambda-promtail/lambda-promtail/main.go +++ b/tools/lambda-promtail/lambda-promtail/main.go @@ -26,12 +26,12 @@ const ( ) var ( - writeAddress *url.URL - username, password, extraLabelsRaw string - keepStream bool - batchSize int - s3Clients map[string]*s3.Client - extraLabels model.LabelSet + writeAddress *url.URL + username, password, extraLabelsRaw, tenantID string + keepStream bool + batchSize int + s3Clients map[string]*s3.Client + extraLabels model.LabelSet ) func setupArguments() { @@ -61,6 +61,8 @@ func setupArguments() { panic("both username and password must be set if either one is set") } + tenantID = os.Getenv("TENANT_ID") + keep := os.Getenv("KEEP_STREAM") // Anything other than case-insensitive 'true' is treated as 'false'. if strings.EqualFold(keep, "true") { diff --git a/tools/lambda-promtail/lambda-promtail/promtail.go b/tools/lambda-promtail/lambda-promtail/promtail.go index bd3cf3f3d05f4..64d1348b76d6a 100644 --- a/tools/lambda-promtail/lambda-promtail/promtail.go +++ b/tools/lambda-promtail/lambda-promtail/promtail.go @@ -169,6 +169,10 @@ func send(ctx context.Context, buf []byte) (int, error) { req.Header.Set("Content-Type", contentType) req.Header.Set("User-Agent", userAgent) + if tenantID != "" { + req.Header.Set("X-Scope-OrgID", tenantID) + } + if username != "" && password != "" { req.SetBasicAuth(username, password) } diff --git a/tools/lambda-promtail/main.tf b/tools/lambda-promtail/main.tf index b4c2f580cbcdd..ba71bb198a26c 100644 --- a/tools/lambda-promtail/main.tf +++ b/tools/lambda-promtail/main.tf @@ -86,6 +86,7 @@ resource "aws_lambda_function" "lambda_promtail" { KEEP_STREAM = var.keep_stream BATCH_SIZE = var.batch_size EXTRA_LABELS = var.extra_labels + TENANT_ID = var.tenant_id } } diff --git a/tools/lambda-promtail/template.yaml b/tools/lambda-promtail/template.yaml index 21e60e8215cb5..5e15b48d5e940 100644 --- a/tools/lambda-promtail/template.yaml +++ b/tools/lambda-promtail/template.yaml @@ -34,6 +34,10 @@ Parameters: Description: Comma separated list of extra labels, in the format 'name1,value1,name2,value2,...,nameN,valueN' to add to entries forwarded by lambda-promtail. Type: String Default: "" + TenantID: + Description: Tenant ID to be added when writing logs from lambda-promtail. + Type: String + Default: "" Resources: LambdaPromtailRole: @@ -80,6 +84,7 @@ Resources: PASSWORD: !Ref Password KEEP_STREAM: !Ref KeepStream EXTRA_LABELS: !Ref ExtraLabels + TENANT_ID: !Ref TenantID LambdaPromtailVersion: Type: AWS::Lambda::Version Properties: diff --git a/tools/lambda-promtail/variables.tf b/tools/lambda-promtail/variables.tf index f94c6d2ce9df5..1a7d6dcdd02fd 100644 --- a/tools/lambda-promtail/variables.tf +++ b/tools/lambda-promtail/variables.tf @@ -35,6 +35,12 @@ variable "password" { default = "" } +variable "tenant_id" { + type = string + description = "Tenant ID to be added when writing logs from lambda-promtail." + default = "" +} + variable "keep_stream" { type = string description = "Determines whether to keep the CloudWatch Log Stream value as a Loki label when writing logs from lambda-promtail." From 18058d2b550e4af98bf16ad48ac811161d7cace4 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Thu, 5 May 2022 14:32:17 +0200 Subject: [PATCH 074/223] Serve correct status of ingester/distributor in gRPC healthcheck endpoint (#6094) * Serve correct status of ingester in gRPC healthcheck endpoint The gRCP healthcheck endpoint is used by clients that determin whether a connection to the service is healthy or not. In case the ingester is not in "running" state and is not "active" in the ring, this healthcheck should return "not serving". Signed-off-by: Christian Haudum * Serve correct status of distributor in gRPC healthcheck endpoint --- pkg/distributor/distributor.go | 8 ++++++-- pkg/ingester/ingester.go | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index d302d24534d81..369fe7a6b6334 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -429,8 +429,12 @@ func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.Instance } // Check implements the grpc healthcheck -func (*Distributor) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { - return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil +func (d *Distributor) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { + status := grpc_health_v1.HealthCheckResponse_SERVING + if (d.State() != services.Running) || (d.distributorsLifecycler.GetState() != ring.ACTIVE) { + status = grpc_health_v1.HealthCheckResponse_NOT_SERVING + } + return &grpc_health_v1.HealthCheckResponse{Status: status}, nil } func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream *logproto.Stream) (string, error) { diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index ed05e966000ad..c6444d0c13c79 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -779,8 +779,12 @@ func (i *Ingester) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo } // Check implements grpc_health_v1.HealthCheck. -func (*Ingester) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { - return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil +func (i *Ingester) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { + status := grpc_health_v1.HealthCheckResponse_SERVING + if (i.State() != services.Running) || (i.lifecycler.GetState() != ring.ACTIVE) { + status = grpc_health_v1.HealthCheckResponse_NOT_SERVING + } + return &grpc_health_v1.HealthCheckResponse{Status: status}, nil } // Watch implements grpc_health_v1.HealthCheck. From e2fdc3437b38c67d817028ce4f002a6f20ca9e1e Mon Sep 17 00:00:00 2001 From: arcosx Date: Fri, 6 May 2022 03:42:35 +0800 Subject: [PATCH 075/223] Feat: add Baidu Cloud BOS as storage backends for Loki #4788 (#5848) * feat: add baidu bce bos storage support Signed-off-by: arcosx * add baidu bce bos as chunk storage backend Signed-off-by: arcosx * fix: some doc error && rewrite bad code Signed-off-by: arcosx * fix: add the `BceServiceError` source link Signed-off-by: arcosx * Update CHANGELOG.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update docs/sources/configuration/_index.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Signed-off-by: arcosx * Update docs/sources/configuration/_index.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update docs/sources/configuration/_index.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update docs/sources/configuration/_index.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update docs/sources/configuration/_index.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update pkg/storage/chunk/client/baidubce/bos_storage_client.go Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update pkg/storage/chunk/client/baidubce/bos_storage_client.go Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> * Update pkg/storage/chunk/client/baidubce/bos_storage_client.go Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> --- CHANGELOG.md | 1 + docs/sources/configuration/_index.md | 30 +- docs/sources/configuration/examples.md | 31 + .../configuration/examples/bos-config.yaml | 25 + docs/sources/operations/storage/_index.md | 1 + go.mod | 1 + pkg/loki/common/common.go | 15 +- pkg/loki/config_wrapper.go | 10 + pkg/loki/config_wrapper_test.go | 56 +- pkg/ruler/base/storage.go | 16 +- .../client/baidubce/bos_storage_client.go | 171 ++ pkg/storage/config/schema_config.go | 1 + pkg/storage/factory.go | 31 +- vendor/github.com/baidubce/bce-sdk-go/LICENSE | 177 ++ .../baidubce/bce-sdk-go/auth/credentials.go | 62 + .../baidubce/bce-sdk-go/auth/signer.go | 184 ++ .../baidubce/bce-sdk-go/bce/builder.go | 189 ++ .../baidubce/bce-sdk-go/bce/client.go | 267 ++ .../baidubce/bce-sdk-go/bce/config.go | 80 + .../baidubce/bce-sdk-go/bce/error.go | 64 + .../baidubce/bce-sdk-go/bce/request.go | 219 ++ .../baidubce/bce-sdk-go/bce/response.go | 128 + .../baidubce/bce-sdk-go/bce/retry.go | 118 + .../baidubce/bce-sdk-go/http/client.go | 174 ++ .../baidubce/bce-sdk-go/http/constants.go | 83 + .../baidubce/bce-sdk-go/http/request.go | 221 ++ .../baidubce/bce-sdk-go/http/response.go | 74 + .../bce-sdk-go/services/bos/api/bucket.go | 1090 ++++++++ .../bce-sdk-go/services/bos/api/model.go | 579 +++++ .../bce-sdk-go/services/bos/api/multipart.go | 420 ++++ .../bce-sdk-go/services/bos/api/object.go | 931 +++++++ .../bce-sdk-go/services/bos/api/util.go | 274 +++ .../bce-sdk-go/services/bos/client.go | 2181 +++++++++++++++++ .../baidubce/bce-sdk-go/util/log/logger.go | 373 +++ .../baidubce/bce-sdk-go/util/log/util.go | 223 ++ .../baidubce/bce-sdk-go/util/string.go | 88 + .../baidubce/bce-sdk-go/util/time.go | 46 + vendor/modules.txt | 9 + 38 files changed, 8615 insertions(+), 28 deletions(-) create mode 100644 docs/sources/configuration/examples/bos-config.yaml create mode 100644 pkg/storage/chunk/client/baidubce/bos_storage_client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/LICENSE create mode 100644 vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/auth/signer.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/builder.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/config.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/error.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/request.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/response.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/retry.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/constants.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/request.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/response.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/log/util.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/string.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/time.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 53d8e56b44fe2..8fe5ff9d04227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ * [5879](https://github.com/grafana/loki/pull/5879) **MichelHollands**: Remove lines matching delete request expression when using "filter-and-delete" deletion mode. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config +* [5848](https://github.com/grafana/loki/pull/5848) **arcosx**: Add Baidu AI Cloud as a storage backend choice. * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist. * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fixes deduping issues when multiple entries exists with the same timestamp. * [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4. diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 5e0f19275ce34..db83b55776f34 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -473,7 +473,7 @@ ruler_client: [poll_interval: | default = 1m] storage: - # Method to use for backend rule storage (azure, gcs, s3, swift, local). + # Method to use for backend rule storage (azure, gcs, s3, swift, local, bos). # CLI flag: -ruler.storage.type [type: ] @@ -492,6 +492,9 @@ storage: # Configures backend rule storage for a local file system directory. [local: ] + # Configures backend rule storage for Baidu Object Storage (BOS). + [bos: ] + # The `hedging` block configures how to hedge storage requests. [hedging: ] @@ -729,6 +732,25 @@ ring: [num_tokens: | default = 128] ``` +## bos_storage_config + +The `bos_storage_config` block configures Baidu Object Storage (BOS) as general storage for data generated by Loki. + +```yaml +# Name of BOS bucket. +# CLI flag: .baidubce.bucket-name +[ bucket_name: | default = "" ] +# BOS endpoint to connect to. +# CLI flag: .baidubce.endpoint +[ endpoint: | default = "bj.bcebos.com" ] +# Baidu Cloud Engine (BCE) Access Key ID +# CLI flag: .baidubce.access-key-id +[ access_key_id: | default = "" ] +# BCE Secret Access Key +# CLI flag: .baidubce.secret-access-key +[ secret_access_key: | default = "" ] +``` + ## azure_storage_config The `azure_storage_config` configures Azure as a general storage for different data generated by Loki. @@ -1983,7 +2005,7 @@ compacts index shards to more performant forms. [working_directory: ] # The shared store used for storing boltdb files. -# Supported types: gcs, s3, azure, swift, filesystem. +# Supported types: gcs, s3, azure, swift, filesystem, bos. # CLI flag: -boltdb.shipper.compactor.shared-store [shared_store: ] @@ -2552,8 +2574,12 @@ If any specific configuration for an object storage client have been provided el # Configures a (local) file system as the common storage. [filesystem: ] +# Configures Baidu Object Storage (BOS) as the common storage. +[bos: ] + # The `hedging_config` configures how to hedge requests for the storage. [hedging: ] + ``` ### filesystem diff --git a/docs/sources/configuration/examples.md b/docs/sources/configuration/examples.md index 0a54ff7e91700..4a0ef202d93d6 100644 --- a/docs/sources/configuration/examples.md +++ b/docs/sources/configuration/examples.md @@ -124,6 +124,37 @@ storage_config: ``` +## bos-config.yaml + +```yaml +schema_config: + configs: + - from: 2020-05-15 + store: boltdb-shipper + object_store: bos + schema: v11 + index: + prefix: index_ + period: 24h + +storage_config: + boltdb_shipper: + active_index_directory: /loki/index + cache_location: /loki/index_cache + shared_store: bos + + bos: + bucket_name: bucket_name_1 + endpoint: bj.bcebos.com + access_key_id: access_key_id + secret_access_key: secret_access_key + +compactor: + working_directory: /tmp/loki/compactor + shared_store: bos +``` + + ## cassandra-index.yaml ```yaml diff --git a/docs/sources/configuration/examples/bos-config.yaml b/docs/sources/configuration/examples/bos-config.yaml new file mode 100644 index 0000000000000..8b4f3dc8c794c --- /dev/null +++ b/docs/sources/configuration/examples/bos-config.yaml @@ -0,0 +1,25 @@ +schema_config: + configs: + - from: 2020-05-15 + store: boltdb-shipper + object_store: bos + schema: v11 + index: + prefix: index_ + period: 24h + +storage_config: + boltdb_shipper: + active_index_directory: /loki/index + cache_location: /loki/index_cache + shared_store: bos + + bos: + bucket_name: bucket_name_1 + endpoint: bj.bcebos.com + access_key_id: access_key_id + secret_access_key: secret_access_key + +compactor: + working_directory: /tmp/loki/compactor + shared_store: bos \ No newline at end of file diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md index 1a01cb7c76d67..b51f9b4b34e13 100644 --- a/docs/sources/operations/storage/_index.md +++ b/docs/sources/operations/storage/_index.md @@ -43,6 +43,7 @@ The following are supported for the chunks: - [Amazon S3](https://aws.amazon.com/s3) - [Google Cloud Storage](https://cloud.google.com/storage/) - [Filesystem](filesystem/) (please read more about the filesystem to understand the pros/cons before using with production data) +- [Baidu Object Storage](https://cloud.baidu.com/product/bos.html) ## Cloud Storage Permissions diff --git a/go.mod b/go.mod index b12ce2bc31375..44e60f196f834 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/Workiva/go-datastructures v1.0.53 github.com/alicebob/miniredis/v2 v2.14.3 github.com/aws/aws-sdk-go v1.43.10 + github.com/baidubce/bce-sdk-go v0.9.81 github.com/bmatcuk/doublestar v1.2.2 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/buger/jsonparser v1.1.1 diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go index 6eebd5d854af3..6c296027419cc 100644 --- a/pkg/loki/common/common.go +++ b/pkg/loki/common/common.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" + "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" "github.com/grafana/loki/pkg/storage/chunk/client/gcp" "github.com/grafana/loki/pkg/storage/chunk/client/hedging" "github.com/grafana/loki/pkg/storage/chunk/client/openstack" @@ -54,12 +55,13 @@ func (c *Config) RegisterFlags(_ *flag.FlagSet) { } type Storage struct { - S3 aws.S3Config `yaml:"s3"` - GCS gcp.GCSConfig `yaml:"gcs"` - Azure azure.BlobStorageConfig `yaml:"azure"` - Swift openstack.SwiftConfig `yaml:"swift"` - FSConfig FilesystemConfig `yaml:"filesystem"` - Hedging hedging.Config `yaml:"hedging"` + S3 aws.S3Config `yaml:"s3"` + GCS gcp.GCSConfig `yaml:"gcs"` + Azure azure.BlobStorageConfig `yaml:"azure"` + BOS baidubce.BOSStorageConfig `yaml:"bos"` + Swift openstack.SwiftConfig `yaml:"swift"` + FSConfig FilesystemConfig `yaml:"filesystem"` + Hedging hedging.Config `yaml:"hedging"` } func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { @@ -67,6 +69,7 @@ func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { s.GCS.RegisterFlagsWithPrefix(prefix+".gcs", f) s.Azure.RegisterFlagsWithPrefix(prefix+".azure", f) s.Swift.RegisterFlagsWithPrefix(prefix+".swift", f) + s.BOS.RegisterFlagsWithPrefix(prefix+".bos", f) s.FSConfig.RegisterFlagsWithPrefix(prefix+".filesystem", f) s.Hedging.RegisterFlagsWithPrefix(prefix, f) } diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index 2f1e19cf6fa98..239b6c97b2a7f 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -453,6 +453,16 @@ func applyStorageConfig(cfg, defaults *ConfigWrapper) error { } } + if !reflect.DeepEqual(cfg.Common.Storage.BOS, defaults.StorageConfig.BOSStorageConfig) { + configsFound++ + applyConfig = func(r *ConfigWrapper) { + r.Ruler.StoreConfig.Type = "bos" + r.Ruler.StoreConfig.BOS = r.Common.Storage.BOS + r.StorageConfig.BOSStorageConfig = r.Common.Storage.BOS + r.CompactorConfig.SharedStoreType = config.StorageTypeBOS + } + } + if !reflect.DeepEqual(cfg.Common.Storage.Swift, defaults.StorageConfig.Swift) { configsFound++ diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index d0150c0c6d8e9..49dbdbec65532 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/loki/pkg/storage/bucket/swift" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" + "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" "github.com/grafana/loki/pkg/storage/chunk/client/gcp" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/util" @@ -201,6 +202,7 @@ memberlist: // s3: aws.S3Config // swift: openstack.SwiftConfig // filesystem: Filesystem + // bos: baidubce.BOSStorageConfig t.Run("does not automatically configure cloud object storage", func(t *testing.T) { config, defaults := testContext(emptyConfigString, nil) @@ -211,12 +213,13 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) - + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig, config.StorageConfig.AWSStorageConfig) assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) }) t.Run("when multiple configs are provided, an error is returned", func(t *testing.T) { @@ -285,12 +288,13 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.GCS, config.Ruler.StoreConfig.GCS) assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) - + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) // should remain empty assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) }) t.Run("when common gcs storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -320,11 +324,13 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) // should remain empty assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) }) t.Run("when common azure storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -370,8 +376,48 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + + // should remain empty + assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) + assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) + assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) + assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + }) + + t.Run("when common bos storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { + bosConfig := `common: + storage: + bos: + bucket_name: arcosx + endpoint: bj.bcebos.com + access_key_id: baidu + secret_access_key: bce` + + config, defaults := testContext(bosConfig, nil) + + assert.Equal(t, "bos", config.Ruler.StoreConfig.Type) + + for _, actual := range []baidubce.BOSStorageConfig{ + config.Ruler.StoreConfig.BOS, + config.StorageConfig.BOSStorageConfig, + } { + assert.Equal(t, "arcosx", actual.BucketName) + assert.Equal(t, "bj.bcebos.com", actual.Endpoint) + assert.Equal(t, "baidu", actual.AccessKeyID) + assert.Equal(t, "bce", actual.SecretAccessKey) + } + + // should remain empty + assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) + assert.EqualValues(t, defaults.Ruler.StoreConfig.GCS, config.Ruler.StoreConfig.GCS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) // should remain empty + assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) @@ -435,12 +481,13 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) - + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) // should remain empty assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) }) t.Run("when common filesystem/local config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -462,12 +509,13 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) - + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) // should remain empty assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) }) t.Run("explicit ruler storage object storage configuration provided via config file is preserved", func(t *testing.T) { diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go index 9f1a4c4240ef1..aacea77c57d66 100644 --- a/pkg/ruler/base/storage.go +++ b/pkg/ruler/base/storage.go @@ -21,6 +21,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" + "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" "github.com/grafana/loki/pkg/storage/chunk/client/gcp" "github.com/grafana/loki/pkg/storage/chunk/client/hedging" "github.com/grafana/loki/pkg/storage/chunk/client/openstack" @@ -33,11 +34,12 @@ type RuleStoreConfig struct { ConfigDB configClient.Config `yaml:"configdb"` // Object Storage Configs - Azure azure.BlobStorageConfig `yaml:"azure"` - GCS gcp.GCSConfig `yaml:"gcs"` - S3 aws.S3Config `yaml:"s3"` - Swift openstack.SwiftConfig `yaml:"swift"` - Local local.Config `yaml:"local"` + Azure azure.BlobStorageConfig `yaml:"azure"` + GCS gcp.GCSConfig `yaml:"gcs"` + S3 aws.S3Config `yaml:"s3"` + BOS baidubce.BOSStorageConfig `yaml:"bos"` + Swift openstack.SwiftConfig `yaml:"swift"` + Local local.Config `yaml:"local"` mock rulestore.RuleStore `yaml:"-"` } @@ -50,7 +52,7 @@ func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.Local.RegisterFlagsWithPrefix("ruler.storage.", f) - + cfg.BOS.RegisterFlagsWithPrefix("ruler.storage.", f) f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local)") } @@ -101,6 +103,8 @@ func NewLegacyRuleStore(cfg RuleStoreConfig, hedgeCfg hedging.Config, clientMetr client, err = gcp.NewGCSObjectClient(context.Background(), cfg.GCS, hedgeCfg) case "s3": client, err = aws.NewS3ObjectClient(cfg.S3, hedgeCfg) + case "bos": + client, err = baidubce.NewBOSObjectStorage(&cfg.BOS) case "swift": client, err = openstack.NewSwiftObjectClient(cfg.Swift, hedgeCfg) case "local": diff --git a/pkg/storage/chunk/client/baidubce/bos_storage_client.go b/pkg/storage/chunk/client/baidubce/bos_storage_client.go new file mode 100644 index 0000000000000..4b79aaf2854ab --- /dev/null +++ b/pkg/storage/chunk/client/baidubce/bos_storage_client.go @@ -0,0 +1,171 @@ +package baidubce + +import ( + "context" + "flag" + + "io" + "time" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/services/bos" + "github.com/baidubce/bce-sdk-go/services/bos/api" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/instrument" + + "github.com/grafana/loki/pkg/storage/chunk/client" +) + +// NoSuchKeyErr The resource you requested does not exist. +// refer to: https://cloud.baidu.com/doc/BOS/s/Ajwvysfpl +// https://intl.cloud.baidu.com/doc/BOS/s/Ajwvysfpl-en +const NoSuchKeyErr = "NoSuchKey" + +const DefaultEndpoint = bos.DEFAULT_SERVICE_DOMAIN + +var bosRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "loki", + Name: "bos_request_duration_seconds", + Help: "Time spent doing BOS requests.", + Buckets: prometheus.ExponentialBuckets(0.005, 4, 6), +}, []string{"operation", "status_code"})) + +func init() { + bosRequestDuration.Register() +} + +type BOSStorageConfig struct { + BucketName string `yaml:"bucket_name"` + Endpoint string `yaml:"endpoint"` + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *BOSStorageConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *BOSStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.BucketName, prefix+"baidubce.bucket-name", "", "Name of BOS bucket.") + f.StringVar(&cfg.Endpoint, prefix+"baidubce.endpoint", DefaultEndpoint, "BOS endpoint to connect to.") + f.StringVar(&cfg.AccessKeyID, prefix+"baidubce.access-key-id", "", "Baidu Cloud Engine (BCE) Access Key ID.") + f.StringVar(&cfg.SecretAccessKey, prefix+"baidubce.secret-access-key", "", "Baidu Cloud Engine (BCE) Secret Access Key.") +} + +type BOSObjectStorage struct { + cfg *BOSStorageConfig + client *bos.Client +} + +func NewBOSObjectStorage(cfg *BOSStorageConfig) (*BOSObjectStorage, error) { + clientConfig := bos.BosClientConfiguration{ + Ak: cfg.AccessKeyID, + Sk: cfg.SecretAccessKey, + Endpoint: cfg.Endpoint, + RedirectDisabled: false, + } + bosClient, err := bos.NewClientWithConfig(&clientConfig) + if err != nil { + return nil, err + } + return &BOSObjectStorage{ + cfg: cfg, + client: bosClient, + }, nil +} + +func (b *BOSObjectStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { + return instrument.CollectedRequest(ctx, "BOS.PutObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + body, err := bce.NewBodyFromSizedReader(object, -1) + if err != nil { + return err + } + _, err = b.client.BasicPutObject(b.cfg.BucketName, objectKey, body) + return err + }) +} + +func (b *BOSObjectStorage) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { + var res *api.GetObjectResult + err := instrument.CollectedRequest(ctx, "BOS.GetObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var requestErr error + res, requestErr = b.client.BasicGetObject(b.cfg.BucketName, objectKey) + return requestErr + }) + if err != nil { + return nil, 0, errors.Wrapf(err, "failed to get BOS object [ %s ]", objectKey) + } + size := res.ContentLength + return res.Body, size, nil +} + +func (b *BOSObjectStorage) List(ctx context.Context, prefix string, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { + var storageObjects []client.StorageObject + var commonPrefixes []client.StorageCommonPrefix + + err := instrument.CollectedRequest(ctx, "BOS.List", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + args := new(api.ListObjectsArgs) + args.Prefix = prefix + args.Delimiter = delimiter + for { + listObjectResult, err := b.client.ListObjects(b.cfg.BucketName, args) + if err != nil { + return err + } + for _, content := range listObjectResult.Contents { + // LastModified format 2021-10-28T06:55:01Z + lastModifiedTime, err := time.Parse(time.RFC3339, content.LastModified) + if err != nil { + return err + } + storageObjects = append(storageObjects, client.StorageObject{ + Key: content.Key, + ModifiedAt: lastModifiedTime, + }) + } + for _, commonPrefix := range listObjectResult.CommonPrefixes { + commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(commonPrefix.Prefix)) + } + if !listObjectResult.IsTruncated { + break + } + args.Prefix = listObjectResult.Prefix + args.Marker = listObjectResult.NextMarker + } + return nil + }) + if err != nil { + return nil, nil, err + } + return storageObjects, commonPrefixes, nil +} + +func (b *BOSObjectStorage) DeleteObject(ctx context.Context, objectKey string) error { + return instrument.CollectedRequest(ctx, "BOS.DeleteObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := b.client.DeleteObject(b.cfg.BucketName, objectKey) + return err + }) +} + +func (b *BOSObjectStorage) IsObjectNotFoundErr(err error) bool { + switch realErr := errors.Cause(err).(type) { + // Client exception indicates an exception encountered when the client attempts to send a request to the BOS and transmits data. + case *bce.BceClientError: + return false + // When an exception occurs on the BOS server, the BOS server returns the corresponding error message to the user to locate the problem. + // BceServiceError will return an error message string to contain the error code : + // https://github.com/baidubce/bce-sdk-go/blob/1e5bfbecf07c6ed5d97a0090a9faee7d89466239/bce/error.go#L47-L53 + case *bce.BceServiceError: + if realErr.Code == NoSuchKeyErr { + return true + } + default: + return false + } + return false +} + +func (b *BOSObjectStorage) Stop() {} diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go index 18b48433b55dd..d7752e9fd0569 100644 --- a/pkg/storage/config/schema_config.go +++ b/pkg/storage/config/schema_config.go @@ -25,6 +25,7 @@ const ( StorageTypeAWS = "aws" StorageTypeAWSDynamo = "aws-dynamo" StorageTypeAzure = "azure" + StorageTypeBOS = "bos" StorageTypeBoltDB = "boltdb" StorageTypeCassandra = "cassandra" StorageTypeInMemory = "inmemory" diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index e800dbf7fa1e8..be2c9cb38d27e 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -15,6 +15,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" + "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" "github.com/grafana/loki/pkg/storage/chunk/client/cassandra" "github.com/grafana/loki/pkg/storage/chunk/client/gcp" "github.com/grafana/loki/pkg/storage/chunk/client/grpc" @@ -45,16 +46,17 @@ type StoreLimits interface { // Config chooses which storage client to use. type Config struct { - AWSStorageConfig aws.StorageConfig `yaml:"aws"` - AzureStorageConfig azure.BlobStorageConfig `yaml:"azure"` - GCPStorageConfig gcp.Config `yaml:"bigtable"` - GCSConfig gcp.GCSConfig `yaml:"gcs"` - CassandraStorageConfig cassandra.Config `yaml:"cassandra"` - BoltDBConfig local.BoltDBConfig `yaml:"boltdb"` - FSConfig local.FSConfig `yaml:"filesystem"` - Swift openstack.SwiftConfig `yaml:"swift"` - GrpcConfig grpc.Config `yaml:"grpc_store"` - Hedging hedging.Config `yaml:"hedging"` + AWSStorageConfig aws.StorageConfig `yaml:"aws"` + AzureStorageConfig azure.BlobStorageConfig `yaml:"azure"` + BOSStorageConfig baidubce.BOSStorageConfig `yaml:"bos"` + GCPStorageConfig gcp.Config `yaml:"bigtable"` + GCSConfig gcp.GCSConfig `yaml:"gcs"` + CassandraStorageConfig cassandra.Config `yaml:"cassandra"` + BoltDBConfig local.BoltDBConfig `yaml:"boltdb"` + FSConfig local.FSConfig `yaml:"filesystem"` + Swift openstack.SwiftConfig `yaml:"swift"` + GrpcConfig grpc.Config `yaml:"grpc_store"` + Hedging hedging.Config `yaml:"hedging"` IndexCacheValidity time.Duration `yaml:"index_cache_validity"` @@ -76,6 +78,7 @@ type Config struct { func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.AWSStorageConfig.RegisterFlags(f) cfg.AzureStorageConfig.RegisterFlags(f) + cfg.BOSStorageConfig.RegisterFlags(f) cfg.GCPStorageConfig.RegisterFlags(f) cfg.GCSConfig.RegisterFlags(f) cfg.CassandraStorageConfig.RegisterFlags(f) @@ -202,6 +205,12 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, clie return nil, err } return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil + case config.StorageTypeBOS: + c, err := baidubce.NewBOSObjectStorage(&cfg.BOSStorageConfig) + if err != nil { + return nil, err + } + return client.NewClientWithMaxParallel(c, nil, cfg.MaxChunkBatchSize, schemaCfg), nil case config.StorageTypeGCP: return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case config.StorageTypeGCPColumnKey, config.StorageTypeBigTable, config.StorageTypeBigTableHashed: @@ -314,6 +323,8 @@ func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (clie return testutils.NewMockStorage(), nil case config.StorageTypeFileSystem: return local.NewFSObjectClient(cfg.FSConfig) + case config.StorageTypeBOS: + return baidubce.NewBOSObjectStorage(&cfg.BOSStorageConfig) default: return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v", name, config.StorageTypeAWS, config.StorageTypeS3, config.StorageTypeGCS, config.StorageTypeAzure, config.StorageTypeFileSystem) } diff --git a/vendor/github.com/baidubce/bce-sdk-go/LICENSE b/vendor/github.com/baidubce/bce-sdk-go/LICENSE new file mode 100644 index 0000000000000..f433b1a53f5b8 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go b/vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go new file mode 100644 index 0000000000000..fb1e415fd5cda --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// credentials.go - the credentials data structure definition + +// Package auth implements the authorization functionality for BCE. +// It use the BCE access key ID and secret access key with the specific sign algorithm to generate +// the authorization string. It also supports the temporary authorization by the STS token. +package auth + +import "errors" + +// BceCredentials define the data structure for authorization +type BceCredentials struct { + AccessKeyId string // access key id to the service + SecretAccessKey string // secret access key to the service + SessionToken string // session token generate by the STS service +} + +func (b *BceCredentials) String() string { + str := "ak: " + b.AccessKeyId + ", sk: " + b.SecretAccessKey + if len(b.SessionToken) != 0 { + return str + ", sessionToken: " + b.SessionToken + } + return str +} + +func NewBceCredentials(ak, sk string) (*BceCredentials, error) { + if len(ak) == 0 { + return nil, errors.New("accessKeyId should not be empty") + } + if len(sk) == 0 { + return nil, errors.New("secretKey should not be empty") + } + + return &BceCredentials{ak, sk, ""}, nil +} + +func NewSessionBceCredentials(ak, sk, token string) (*BceCredentials, error) { + if len(token) == 0 { + return nil, errors.New("sessionToken should not be empty") + } + + result, err := NewBceCredentials(ak, sk) + if err != nil { + return nil, err + } + result.SessionToken = token + + return result, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/auth/signer.go b/vendor/github.com/baidubce/bce-sdk-go/auth/signer.go new file mode 100644 index 0000000000000..573479806bc22 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/auth/signer.go @@ -0,0 +1,184 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// signer.go - implement the specific sign algorithm of BCE V1 protocol + +package auth + +import ( + "fmt" + "sort" + "strings" + + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" + "github.com/baidubce/bce-sdk-go/util/log" +) + +var ( + BCE_AUTH_VERSION = "bce-auth-v1" + SIGN_JOINER = "\n" + SIGN_HEADER_JOINER = ";" + DEFAULT_EXPIRE_SECONDS = 1800 + DEFAULT_HEADERS_TO_SIGN = map[string]struct{}{ + strings.ToLower(http.HOST): {}, + strings.ToLower(http.CONTENT_LENGTH): {}, + strings.ToLower(http.CONTENT_TYPE): {}, + strings.ToLower(http.CONTENT_MD5): {}, + } +) + +// Signer abstracts the entity that implements the `Sign` method +type Signer interface { + // Sign the given Request with the Credentials and SignOptions + Sign(*http.Request, *BceCredentials, *SignOptions) +} + +// SignOptions defines the data structure used by Signer +type SignOptions struct { + HeadersToSign map[string]struct{} + Timestamp int64 + ExpireSeconds int +} + +func (opt *SignOptions) String() string { + return fmt.Sprintf(`SignOptions [ + HeadersToSign=%s; + Timestamp=%d; + ExpireSeconds=%d + ]`, opt.HeadersToSign, opt.Timestamp, opt.ExpireSeconds) +} + +// BceV1Signer implements the v1 sign algorithm +type BceV1Signer struct{} + +// Sign - generate the authorization string from the BceCredentials and SignOptions +// +// PARAMS: +// - req: *http.Request for this sign +// - cred: *BceCredentials to access the serice +// - opt: *SignOptions for this sign algorithm +func (b *BceV1Signer) Sign(req *http.Request, cred *BceCredentials, opt *SignOptions) { + if req == nil { + log.Fatal("request should not be null for sign") + return + } + if cred == nil { + log.Fatal("credentials should not be null for sign") + return + } + + // Prepare parameters + accessKeyId := cred.AccessKeyId + secretAccessKey := cred.SecretAccessKey + signDate := util.FormatISO8601Date(util.NowUTCSeconds()) + // Modify the sign time if it is not the default value but specified by client + if opt.Timestamp != 0 { + signDate = util.FormatISO8601Date(opt.Timestamp) + } + + // Set security token if using session credentials + if len(cred.SessionToken) != 0 { + req.SetHeader(http.BCE_SECURITY_TOKEN, cred.SessionToken) + } + + // Prepare the canonical request components + signKeyInfo := fmt.Sprintf("%s/%s/%s/%d", + BCE_AUTH_VERSION, + accessKeyId, + signDate, + opt.ExpireSeconds) + signKey := util.HmacSha256Hex(secretAccessKey, signKeyInfo) + canonicalUri := getCanonicalURIPath(req.Uri()) + canonicalQueryString := getCanonicalQueryString(req.Params()) + canonicalHeaders, signedHeadersArr := getCanonicalHeaders(req.Headers(), opt.HeadersToSign) + + // Generate signed headers string + signedHeaders := "" + if len(signedHeadersArr) > 0 { + sort.Strings(signedHeadersArr) + signedHeaders = strings.Join(signedHeadersArr, SIGN_HEADER_JOINER) + } + + // Generate signature + canonicalParts := []string{req.Method(), canonicalUri, canonicalQueryString, canonicalHeaders} + canonicalReq := strings.Join(canonicalParts, SIGN_JOINER) + log.Debug("CanonicalRequest data:\n" + canonicalReq) + signature := util.HmacSha256Hex(signKey, canonicalReq) + + // Generate auth string and add to the reqeust header + authStr := signKeyInfo + "/" + signedHeaders + "/" + signature + log.Info("Authorization=" + authStr) + + req.SetHeader(http.AUTHORIZATION, authStr) +} + +func getCanonicalURIPath(path string) string { + if len(path) == 0 { + return "/" + } + canonical_path := path + if strings.HasPrefix(path, "/") { + canonical_path = path[1:] + } + canonical_path = util.UriEncode(canonical_path, false) + return "/" + canonical_path +} + +func getCanonicalQueryString(params map[string]string) string { + if len(params) == 0 { + return "" + } + + result := make([]string, 0, len(params)) + for k, v := range params { + if strings.ToLower(k) == strings.ToLower(http.AUTHORIZATION) { + continue + } + item := "" + if len(v) == 0 { + item = fmt.Sprintf("%s=", util.UriEncode(k, true)) + } else { + item = fmt.Sprintf("%s=%s", util.UriEncode(k, true), util.UriEncode(v, true)) + } + result = append(result, item) + } + sort.Strings(result) + return strings.Join(result, "&") +} + +func getCanonicalHeaders(headers map[string]string, + headersToSign map[string]struct{}) (string, []string) { + canonicalHeaders := make([]string, 0, len(headers)) + signHeaders := make([]string, 0, len(headersToSign)) + for k, v := range headers { + headKey := strings.ToLower(k) + if headKey == strings.ToLower(http.AUTHORIZATION) { + continue + } + _, headExists := headersToSign[headKey] + if headExists || + (strings.HasPrefix(headKey, http.BCE_PREFIX) && + (headKey != http.BCE_REQUEST_ID)) { + + headVal := strings.TrimSpace(v) + encoded := util.UriEncode(headKey, true) + ":" + util.UriEncode(headVal, true) + canonicalHeaders = append(canonicalHeaders, encoded) + signHeaders = append(signHeaders, headKey) + } + } + sort.Strings(canonicalHeaders) + sort.Strings(signHeaders) + return strings.Join(canonicalHeaders, SIGN_JOINER), signHeaders +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/builder.go b/vendor/github.com/baidubce/bce-sdk-go/bce/builder.go new file mode 100644 index 0000000000000..1b25504cb4ec5 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/builder.go @@ -0,0 +1,189 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// builder.go - defines the RequestBuilder structure for BCE servies + +package bce + +import ( + "encoding/json" + "fmt" +) + +// RequestBuilder holds config data for bce request. +// Some of fields are required and the others are optional. +// The builder pattern can simplify the execution of requests. +type RequestBuilder struct { + client Client + + url string // required + method string // required + queryParams map[string]string // optional + headers map[string]string // optional + body interface{} // optional + result interface{} // optional +} + +// create RequestBuilder with the client. +func NewRequestBuilder(client Client) *RequestBuilder { + return &RequestBuilder{ + client: client, + } +} + +func (b *RequestBuilder) WithURL(url string) *RequestBuilder { + b.url = url + return b +} + +func (b *RequestBuilder) WithMethod(method string) *RequestBuilder { + b.method = method + return b +} + +// set query param with the key/value directly. +func (b *RequestBuilder) WithQueryParam(key, value string) *RequestBuilder { + if b.queryParams == nil { + b.queryParams = make(map[string]string) + } + b.queryParams[key] = value + return b +} + +// set query param with the key/value only when the value is not blank. +func (b *RequestBuilder) WithQueryParamFilter(key, value string) *RequestBuilder { + if len(value) == 0 { + return b + } + return b.WithQueryParam(key, value) +} + +func (b *RequestBuilder) WithQueryParams(params map[string]string) *RequestBuilder { + if b.queryParams == nil { + b.queryParams = params + } else { + for key, value := range params { + b.queryParams[key] = value + } + } + return b +} + +func (b *RequestBuilder) WithHeader(key, value string) *RequestBuilder { + if b.headers == nil { + b.headers = make(map[string]string) + } + b.headers[key] = value + return b +} + +func (b *RequestBuilder) WithHeaders(headers map[string]string) *RequestBuilder { + if b.headers == nil { + b.headers = headers + } else { + for key, value := range headers { + b.headers[key] = value + } + } + return b +} + +func (b *RequestBuilder) WithBody(body interface{}) *RequestBuilder { + b.body = body + return b +} + +func (b *RequestBuilder) WithResult(result interface{}) *RequestBuilder { + b.result = result + return b +} + +// Do will send request to bce and get result with the builder's parameters. +func (b *RequestBuilder) Do() error { + if err := b.validate(); err != nil { + return err + } + + // build BceRequest + req, err := b.buildBceRequest() + if err != nil { + return err + } + + // get result from BceResponse + if err := b.buildBceResponse(req); err != nil { + return err + } + + return nil +} + +// Validate if the required fields are providered. +func (b *RequestBuilder) validate() error { + if len(b.url) == 0 { + return fmt.Errorf("The url can't be null.") + } + if len(b.method) == 0 { + return fmt.Errorf("The method can't be null.") + } + if b.client == nil { + return fmt.Errorf("The client can't be null.") + } + return nil +} + +func (b *RequestBuilder) buildBceRequest() (*BceRequest, error) { + // Build the request + req := &BceRequest{} + req.SetUri(b.url) + req.SetMethod(b.method) + + if b.headers != nil { + req.SetHeaders(b.headers) + } + if b.queryParams != nil { + req.SetParams(b.queryParams) + } + if b.body != nil { + bodyBytes, err := json.Marshal(b.body) + if err != nil { + return nil, err + } + body, err := NewBodyFromBytes(bodyBytes) + if err != nil { + return nil, err + } + req.SetBody(body) + } + + return req, nil +} + +func (b *RequestBuilder) buildBceResponse(req *BceRequest) error { + // Send request and get response + resp := &BceResponse{} + if err := b.client.SendRequest(req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer resp.Body().Close() + + if b.result == nil { + return nil + } + + return resp.ParseJsonBody(b.result) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/client.go b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go new file mode 100644 index 0000000000000..1600920b716b2 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go @@ -0,0 +1,267 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// client.go - definiton the BceClientConfiguration and BceClient structure + +// Package bce implements the infrastructure to access BCE services. +// +// - BceClient: +// It is the general client of BCE to access all services. It builds http request to access the +// services based on the given client configuration. +// +// - BceClientConfiguration: +// The client configuration data structure which contains endpoint, region, credentials, retry +// policy, sign options and so on. It supports most of the default value and user can also +// access or change the default with its public fields' name. +// +// - Error types: +// The error types when making request or receiving response to the BCE services contains two +// types: the BceClientError when making request to BCE services and the BceServiceError when +// recieving response from them. +// +// - BceRequest: +// The request instance stands for an request to access the BCE services. +// +// - BceResponse: +// The response instance stands for an response from the BCE services. +package bce + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/baidubce/bce-sdk-go/auth" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" + "github.com/baidubce/bce-sdk-go/util/log" +) + +// Client is the general interface which can perform sending request. Different service +// will define its own client in case of specific extension. +type Client interface { + SendRequest(*BceRequest, *BceResponse) error + SendRequestFromBytes(*BceRequest, *BceResponse, []byte) error + GetBceClientConfig() *BceClientConfiguration +} + +// BceClient defines the general client to access the BCE services. +type BceClient struct { + Config *BceClientConfiguration + Signer auth.Signer // the sign algorithm +} + +// BuildHttpRequest - the helper method for the client to build http request +// +// PARAMS: +// - request: the input request object to be built +func (c *BceClient) buildHttpRequest(request *BceRequest) { + // Construct the http request instance for the special fields + request.BuildHttpRequest() + + // Set the client specific configurations + if request.Endpoint() == "" { + request.SetEndpoint(c.Config.Endpoint) + } + if request.Protocol() == "" { + request.SetProtocol(DEFAULT_PROTOCOL) + } + if len(c.Config.ProxyUrl) != 0 { + request.SetProxyUrl(c.Config.ProxyUrl) + } + request.SetTimeout(c.Config.ConnectionTimeoutInMillis / 1000) + + // Set the BCE request headers + request.SetHeader(http.HOST, request.Host()) + request.SetHeader(http.USER_AGENT, c.Config.UserAgent) + request.SetHeader(http.BCE_DATE, util.FormatISO8601Date(util.NowUTCSeconds())) + + // Generate the auth string if needed + if c.Config.Credentials != nil { + c.Signer.Sign(&request.Request, c.Config.Credentials, c.Config.SignOption) + } +} + +// SendRequest - the client performs sending the http request with retry policy and receive the +// response from the BCE services. +// +// PARAMS: +// - req: the request object to be sent to the BCE service +// - resp: the response object to receive the content from BCE service +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *BceClient) SendRequest(req *BceRequest, resp *BceResponse) error { + // Return client error if it is not nil + if req.ClientError() != nil { + return req.ClientError() + } + + // Build the http request and prepare to send + c.buildHttpRequest(req) + log.Infof("send http request: %v", req) + + // Send request with the given retry policy + retries := 0 + if req.Body() != nil { + defer req.Body().Close() // Manually close the ReadCloser body for retry + } + for { + // The request body should be temporarily saved if retry to send the http request + var retryBuf bytes.Buffer + var teeReader io.Reader + if c.Config.Retry.ShouldRetry(nil, 0) && req.Body() != nil { + teeReader = io.TeeReader(req.Body(), &retryBuf) + req.Request.SetBody(ioutil.NopCloser(teeReader)) + } + httpResp, err := http.Execute(&req.Request) + + if err != nil { + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return &BceClientError{ + fmt.Sprintf("execute http request failed! Retried %d times, error: %v", + retries, err)} + } + retries++ + log.Warnf("send request failed: %v, retry for %d time(s)", err, retries) + if req.Body() != nil { + ioutil.ReadAll(teeReader) + req.Request.SetBody(ioutil.NopCloser(&retryBuf)) + } + continue + } + resp.SetHttpResponse(httpResp) + resp.ParseResponse() + + log.Infof("receive http response: status: %s, debugId: %s, requestId: %s, elapsed: %v", + resp.StatusText(), resp.DebugId(), resp.RequestId(), resp.ElapsedTime()) + for k, v := range resp.Headers() { + log.Debugf("%s=%s", k, v) + } + if resp.IsFail() { + err := resp.ServiceError() + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return err + } + retries++ + log.Warnf("send request failed, retry for %d time(s)", retries) + if req.Body() != nil { + ioutil.ReadAll(teeReader) + req.Request.SetBody(ioutil.NopCloser(&retryBuf)) + } + continue + } + return nil + } +} + +// SendRequestFromBytes - the client performs sending the http request with retry policy and receive the +// response from the BCE services. +// +// PARAMS: +// - req: the request object to be sent to the BCE service +// - resp: the response object to receive the content from BCE service +// - content: the content of body +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *BceClient) SendRequestFromBytes(req *BceRequest, resp *BceResponse, content []byte) error { + // Return client error if it is not nil + if req.ClientError() != nil { + return req.ClientError() + } + // Build the http request and prepare to send + c.buildHttpRequest(req) + log.Infof("send http request: %v", req) + // Send request with the given retry policy + retries := 0 + for { + // The request body should be temporarily saved if retry to send the http request + buf := bytes.NewBuffer(content) + req.Request.SetBody(ioutil.NopCloser(buf)) + defer req.Request.Body().Close() // Manually close the ReadCloser body for retry + httpResp, err := http.Execute(&req.Request) + if err != nil { + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return &BceClientError{ + fmt.Sprintf("execute http request failed! Retried %d times, error: %v", + retries, err)} + } + retries++ + log.Warnf("send request failed: %v, retry for %d time(s)", err, retries) + continue + } + resp.SetHttpResponse(httpResp) + resp.ParseResponse() + log.Infof("receive http response: status: %s, debugId: %s, requestId: %s, elapsed: %v", + resp.StatusText(), resp.DebugId(), resp.RequestId(), resp.ElapsedTime()) + for k, v := range resp.Headers() { + log.Debugf("%s=%s", k, v) + } + if resp.IsFail() { + err := resp.ServiceError() + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return err + } + retries++ + log.Warnf("send request failed, retry for %d time(s)", retries) + continue + } + return nil + } +} + +func (c *BceClient) GetBceClientConfig() *BceClientConfiguration { + return c.Config +} + +func NewBceClient(conf *BceClientConfiguration, sign auth.Signer) *BceClient { + clientConfig := http.ClientConfig{RedirectDisabled: conf.RedirectDisabled} + http.InitClient(clientConfig) + return &BceClient{conf, sign} +} + +func NewBceClientWithAkSk(ak, sk, endPoint string) (*BceClient, error) { + credentials, err := auth.NewBceCredentials(ak, sk) + if err != nil { + return nil, err + } + defaultSignOptions := &auth.SignOptions{ + HeadersToSign: auth.DEFAULT_HEADERS_TO_SIGN, + ExpireSeconds: auth.DEFAULT_EXPIRE_SECONDS} + defaultConf := &BceClientConfiguration{ + Endpoint: endPoint, + Region: DEFAULT_REGION, + UserAgent: DEFAULT_USER_AGENT, + Credentials: credentials, + SignOption: defaultSignOptions, + Retry: DEFAULT_RETRY_POLICY, + ConnectionTimeoutInMillis: DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS, + RedirectDisabled: false} + v1Signer := &auth.BceV1Signer{} + + return NewBceClient(defaultConf, v1Signer), nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go new file mode 100644 index 0000000000000..037e2e890021c --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go @@ -0,0 +1,80 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// config.go - define the client configuration for BCE + +package bce + +import ( + "fmt" + "reflect" + "runtime" + + "github.com/baidubce/bce-sdk-go/auth" +) + +// Constants and default values for the package bce +const ( + SDK_VERSION = "0.9.74" + URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path + DEFAULT_DOMAIN = "baidubce.com" + DEFAULT_PROTOCOL = "http" + DEFAULT_REGION = "bj" + DEFAULT_CONTENT_TYPE = "application/json;charset=utf-8" + DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS = 1200 * 1000 +) + +var ( + DEFAULT_USER_AGENT string + DEFAULT_RETRY_POLICY = NewBackOffRetryPolicy(3, 20000, 300) +) + +func init() { + DEFAULT_USER_AGENT = "bce-sdk-go" + DEFAULT_USER_AGENT += "/" + SDK_VERSION + DEFAULT_USER_AGENT += "/" + runtime.Version() + DEFAULT_USER_AGENT += "/" + runtime.GOOS + DEFAULT_USER_AGENT += "/" + runtime.GOARCH +} + +// BceClientConfiguration defines the config components structure. +type BceClientConfiguration struct { + Endpoint string + ProxyUrl string + Region string + UserAgent string + Credentials *auth.BceCredentials + SignOption *auth.SignOptions + Retry RetryPolicy + ConnectionTimeoutInMillis int + // CnameEnabled should be true when use custom domain as endpoint to visit bos resource + CnameEnabled bool + BackupEndpoint string + RedirectDisabled bool +} + +func (c *BceClientConfiguration) String() string { + return fmt.Sprintf(`BceClientConfiguration [ + Endpoint=%s; + ProxyUrl=%s; + Region=%s; + UserAgent=%s; + Credentials=%v; + SignOption=%v; + RetryPolicy=%v; + ConnectionTimeoutInMillis=%v; + RedirectDisabled=%v + ]`, c.Endpoint, c.ProxyUrl, c.Region, c.UserAgent, c.Credentials, + c.SignOption, reflect.TypeOf(c.Retry).Name(), c.ConnectionTimeoutInMillis, c.RedirectDisabled) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/error.go b/vendor/github.com/baidubce/bce-sdk-go/bce/error.go new file mode 100644 index 0000000000000..e62565129819b --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/error.go @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// error.go - define the error types for BCE + +package bce + +const ( + EACCESS_DENIED = "AccessDenied" + EINAPPROPRIATE_JSON = "InappropriateJSON" + EINTERNAL_ERROR = "InternalError" + EINVALID_ACCESS_KEY_ID = "InvalidAccessKeyId" + EINVALID_HTTP_AUTH_HEADER = "InvalidHTTPAuthHeader" + EINVALID_HTTP_REQUEST = "InvalidHTTPRequest" + EINVALID_URI = "InvalidURI" + EMALFORMED_JSON = "MalformedJSON" + EINVALID_VERSION = "InvalidVersion" + EOPT_IN_REQUIRED = "OptInRequired" + EPRECONDITION_FAILED = "PreconditionFailed" + EREQUEST_EXPIRED = "RequestExpired" + ESIGNATURE_DOES_NOT_MATCH = "SignatureDoesNotMatch" +) + +// BceError abstracts the error for BCE +type BceError interface { + error +} + +// BceClientError defines the error struct for the client when making request +type BceClientError struct{ Message string } + +func (b *BceClientError) Error() string { return b.Message } + +func NewBceClientError(msg string) *BceClientError { return &BceClientError{msg} } + +// BceServiceError defines the error struct for the BCE service when receiving response +type BceServiceError struct { + Code string + Message string + RequestId string + StatusCode int +} + +func (b *BceServiceError) Error() string { + ret := "[Code: " + b.Code + ret += "; Message: " + b.Message + ret += "; RequestId: " + b.RequestId + "]" + return ret +} + +func NewBceServiceError(code, msg, reqId string, status int) *BceServiceError { + return &BceServiceError{code, msg, reqId, status} +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/request.go b/vendor/github.com/baidubce/bce-sdk-go/bce/request.go new file mode 100644 index 0000000000000..ca8409e1dad3d --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/request.go @@ -0,0 +1,219 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// request.go - defines the BCE servies request + +package bce + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" +) + +// Body defines the data structure used in BCE request. +// Every BCE request that sets the body field must set its content-length and content-md5 headers +// to ensure the correctness of the body content forcely, and users can also set the content-sha256 +// header to strengthen the correctness with the "SetHeader" method. +type Body struct { + stream io.ReadCloser + size int64 + contentMD5 string +} + +func (b *Body) Stream() io.ReadCloser { return b.stream } + +func (b *Body) SetStream(stream io.ReadCloser) { b.stream = stream } + +func (b *Body) Size() int64 { return b.size } + +func (b *Body) ContentMD5() string { return b.contentMD5 } + +// NewBodyFromBytes - build a Body object from the byte stream to be used in the http request, it +// calculates the content-md5 of the byte stream and store the size as well as the stream. +// +// PARAMS: +// - stream: byte stream +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromBytes(stream []byte) (*Body, error) { + buf := bytes.NewBuffer(stream) + size := int64(buf.Len()) + contentMD5, err := util.CalculateContentMD5(buf, size) + if err != nil { + return nil, err + } + buf = bytes.NewBuffer(stream) + return &Body{ioutil.NopCloser(buf), size, contentMD5}, nil +} + +// NewBodyFromString - build a Body object from the string to be used in the http request, it +// calculates the content-md5 of the byte stream and store the size as well as the stream. +// +// PARAMS: +// - str: the input string +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromString(str string) (*Body, error) { + buf := bytes.NewBufferString(str) + size := int64(len(str)) + contentMD5, err := util.CalculateContentMD5(buf, size) + if err != nil { + return nil, err + } + buf = bytes.NewBufferString(str) + return &Body{ioutil.NopCloser(buf), size, contentMD5}, nil +} + +// NewBodyFromFile - build a Body object from the given file name to be used in the http request, +// it calculates the content-md5 of the byte stream and store the size as well as the stream. +// +// PARAMS: +// - fname: the given file name +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromFile(fname string) (*Body, error) { + file, err := os.Open(fname) + if err != nil { + return nil, err + } + fileInfo, infoErr := file.Stat() + if infoErr != nil { + return nil, infoErr + } + contentMD5, md5Err := util.CalculateContentMD5(file, fileInfo.Size()) + if md5Err != nil { + return nil, md5Err + } + if _, err = file.Seek(0, 0); err != nil { + return nil, err + } + return &Body{file, fileInfo.Size(), contentMD5}, nil +} + +// NewBodyFromSectionFile - build a Body object from the given file pointer with offset and size. +// It calculates the content-md5 of the given content and store the size as well as the stream. +// +// PARAMS: +// - file: the input file pointer +// - off: offset of current section body +// - size: current section body size +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromSectionFile(file *os.File, off, size int64) (*Body, error) { + if _, err := file.Seek(off, 0); err != nil { + return nil, err + } + contentMD5, md5Err := util.CalculateContentMD5(file, size) + if md5Err != nil { + return nil, md5Err + } + if _, err := file.Seek(0, 0); err != nil { + return nil, err + } + section := io.NewSectionReader(file, off, size) + return &Body{ioutil.NopCloser(section), size, contentMD5}, nil +} + +// NewBodyFromSizedReader - build a Body object from the given reader with size. +// It calculates the content-md5 of the given content and store the size as well as the stream. +// +// PARAMS: +// - r: the input reader +// - size: the size to be read, -1 is read all +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromSizedReader(r io.Reader, size int64) (*Body, error) { + var buffer bytes.Buffer + var rlen int64 + var err error + if size >= 0 { + rlen, err = io.CopyN(&buffer, r, size) + } else { + rlen, err = io.Copy(&buffer, r) + } + if err != nil { + return nil, err + } + if rlen != int64(buffer.Len()) { // must be equal + return nil, NewBceClientError("unexpected reader") + } + if size >= 0 { + if rlen < size { + return nil, NewBceClientError("size is great than reader actual size") + } + } + contentMD5, err := util.CalculateContentMD5(bytes.NewBuffer(buffer.Bytes()), rlen) + if err != nil { + return nil, err + } + body := &Body{ + stream: ioutil.NopCloser(&buffer), + size: rlen, + contentMD5: contentMD5, + } + return body, nil +} + +// BceRequest defines the request structure for accessing BCE services +type BceRequest struct { + http.Request + requestId string + clientError *BceClientError +} + +func (b *BceRequest) RequestId() string { return b.requestId } + +func (b *BceRequest) SetRequestId(val string) { b.requestId = val } + +func (b *BceRequest) ClientError() *BceClientError { return b.clientError } + +func (b *BceRequest) SetClientError(err *BceClientError) { b.clientError = err } + +func (b *BceRequest) SetBody(body *Body) { // override SetBody derived from http.Request + b.Request.SetBody(body.Stream()) + b.SetLength(body.Size()) // set field of "net/http.Request.ContentLength" + if body.Size() > 0 { + b.SetHeader(http.CONTENT_MD5, body.ContentMD5()) + b.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", body.Size())) + } +} + +func (b *BceRequest) BuildHttpRequest() { + // Only need to build the specific `requestId` field for BCE, other fields are same as the + // `http.Request` as well as its methods. + if len(b.requestId) == 0 { + // Construct the request ID with UUID + b.requestId = util.NewRequestId() + } + b.SetHeader(http.BCE_REQUEST_ID, b.requestId) +} + +func (b *BceRequest) String() string { + requestIdStr := "requestId=" + b.requestId + if b.clientError != nil { + return requestIdStr + ", client error: " + b.ClientError().Error() + } + return requestIdStr + "\n" + b.Request.String() +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/response.go b/vendor/github.com/baidubce/bce-sdk-go/bce/response.go new file mode 100644 index 0000000000000..94f2ae4b8c080 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/response.go @@ -0,0 +1,128 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// response.go - defines the common BCE services response + +package bce + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/baidubce/bce-sdk-go/http" +) + +// BceResponse defines the response structure for receiving BCE services response. +type BceResponse struct { + statusCode int + statusText string + requestId string + debugId string + response *http.Response + serviceError *BceServiceError +} + +func (r *BceResponse) IsFail() bool { + return r.response.StatusCode() >= 400 +} + +func (r *BceResponse) StatusCode() int { + return r.statusCode +} + +func (r *BceResponse) StatusText() string { + return r.statusText +} + +func (r *BceResponse) RequestId() string { + return r.requestId +} + +func (r *BceResponse) DebugId() string { + return r.debugId +} + +func (r *BceResponse) Header(key string) string { + return r.response.GetHeader(key) +} + +func (r *BceResponse) Headers() map[string]string { + return r.response.GetHeaders() +} + +func (r *BceResponse) Body() io.ReadCloser { + return r.response.Body() +} + +func (r *BceResponse) SetHttpResponse(response *http.Response) { + r.response = response +} + +func (r *BceResponse) ElapsedTime() time.Duration { + return r.response.ElapsedTime() +} + +func (r *BceResponse) ServiceError() *BceServiceError { + return r.serviceError +} + +func (r *BceResponse) ParseResponse() { + r.statusCode = r.response.StatusCode() + r.statusText = r.response.StatusText() + r.requestId = r.response.GetHeader(http.BCE_REQUEST_ID) + r.debugId = r.response.GetHeader(http.BCE_DEBUG_ID) + if r.IsFail() { + r.serviceError = NewBceServiceError("", r.statusText, r.requestId, r.statusCode) + + // First try to read the error `Code' and `Message' from body + rawBody, _ := ioutil.ReadAll(r.Body()) + defer r.Body().Close() + if len(rawBody) != 0 { + jsonDecoder := json.NewDecoder(bytes.NewBuffer(rawBody)) + if err := jsonDecoder.Decode(r.serviceError); err != nil { + r.serviceError = NewBceServiceError( + EMALFORMED_JSON, + "Service json error message decode failed", + r.requestId, + r.statusCode) + } + return + } + + // Then guess the `Message' from by the return status code + switch r.statusCode { + case 400: + r.serviceError.Code = EINVALID_HTTP_REQUEST + case 403: + r.serviceError.Code = EACCESS_DENIED + case 412: + r.serviceError.Code = EPRECONDITION_FAILED + case 500: + r.serviceError.Code = EINTERNAL_ERROR + default: + words := strings.Split(r.statusText, " ") + r.serviceError.Code = strings.Join(words[1:], "") + } + } +} + +func (r *BceResponse) ParseJsonBody(result interface{}) error { + defer r.Body().Close() + jsonDecoder := json.NewDecoder(r.Body()) + return jsonDecoder.Decode(result) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/retry.go b/vendor/github.com/baidubce/bce-sdk-go/bce/retry.go new file mode 100644 index 0000000000000..19675a3e42587 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/retry.go @@ -0,0 +1,118 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// retry.go - define the retry policy when making requests to BCE services + +package bce + +import ( + "net" + "net/http" + "time" + + "github.com/baidubce/bce-sdk-go/util/log" +) + +// RetryPolicy defines the two methods to retry for sending request. +type RetryPolicy interface { + ShouldRetry(BceError, int) bool + GetDelayBeforeNextRetryInMillis(BceError, int) time.Duration +} + +// NoRetryPolicy just does not retry. +type NoRetryPolicy struct{} + +func (_ *NoRetryPolicy) ShouldRetry(err BceError, attempts int) bool { + return false +} + +func (_ *NoRetryPolicy) GetDelayBeforeNextRetryInMillis( + err BceError, attempts int) time.Duration { + return 0 * time.Millisecond +} + +func NewNoRetryPolicy() *NoRetryPolicy { + return &NoRetryPolicy{} +} + +// BackOffRetryPolicy implements a policy that retries with exponential back-off strategy. +// This policy will keep retrying until the maximum number of retries is reached. The delay time +// will be a fixed interval for the first time then 2 * interval for the second, 4 * internal for +// the third, and so on. +// In general, the delay time will be 2^number_of_retries_attempted*interval. When a maximum of +// delay time is specified, the delay time will never exceed this limit. +type BackOffRetryPolicy struct { + maxErrorRetry int + maxDelayInMillis int64 + baseIntervalInMillis int64 +} + +func (b *BackOffRetryPolicy) ShouldRetry(err BceError, attempts int) bool { + // Do not retry any more when retry the max times + if attempts >= b.maxErrorRetry { + return false + } + + if err == nil { + return true + } + + // Always retry on IO error + if _, ok := err.(net.Error); ok { + return true + } + + // Only retry on a service error + if realErr, ok := err.(*BceServiceError); ok { + switch realErr.StatusCode { + case http.StatusInternalServerError: + log.Warn("retry for internal server error(500)") + return true + case http.StatusBadGateway: + log.Warn("retry for bad gateway(502)") + return true + case http.StatusServiceUnavailable: + log.Warn("retry for service unavailable(503)") + return true + case http.StatusBadRequest: + if realErr.Code != "Http400" { + return false + } + log.Warn("retry for bad request(400)") + return true + } + + if realErr.Code == EREQUEST_EXPIRED { + log.Warn("retry for request expired") + return true + } + } + return false +} + +func (b *BackOffRetryPolicy) GetDelayBeforeNextRetryInMillis( + err BceError, attempts int) time.Duration { + if attempts < 0 { + return 0 * time.Millisecond + } + delayInMillis := (1 << uint64(attempts)) * b.baseIntervalInMillis + if delayInMillis > b.maxDelayInMillis { + return time.Duration(b.maxDelayInMillis) * time.Millisecond + } + return time.Duration(delayInMillis) * time.Millisecond +} + +func NewBackOffRetryPolicy(maxRetry int, maxDelay, base int64) *BackOffRetryPolicy { + return &BackOffRetryPolicy{maxRetry, maxDelay, base} +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/client.go b/vendor/github.com/baidubce/bce-sdk-go/http/client.go new file mode 100644 index 0000000000000..b036a40350f87 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/client.go @@ -0,0 +1,174 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// client.go - define the execute function to send http request and get response + +// Package http defines the structure of request and response which used to access the BCE services +// as well as the http constant headers and and methods. And finally implement the `Execute` funct- +// ion to do the work. +package http + +import ( + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + defaultMaxIdleConnsPerHost = 500 + defaultResponseHeaderTimeout = 60 * time.Second + defaultDialTimeout = 30 * time.Second + defaultSmallInterval = 600 * time.Second + defaultLargeInterval = 1200 * time.Second +) + +// The httpClient is the global variable to send the request and get response +// for reuse and the Client provided by the Go standard library is thread safe. +var ( + httpClient *http.Client + transport *http.Transport +) + +type timeoutConn struct { + conn net.Conn + smallInterval time.Duration + largeInterval time.Duration +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.smallInterval)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.largeInterval)) + return n, err +} +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.smallInterval)) + n, err = c.conn.Write(b) + c.SetWriteDeadline(time.Now().Add(c.largeInterval)) + return n, err +} +func (c *timeoutConn) Close() error { return c.conn.Close() } +func (c *timeoutConn) LocalAddr() net.Addr { return c.conn.LocalAddr() } +func (c *timeoutConn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } +func (c *timeoutConn) SetDeadline(t time.Time) error { return c.conn.SetDeadline(t) } +func (c *timeoutConn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) } +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { return c.conn.SetWriteDeadline(t) } + +type ClientConfig struct { + RedirectDisabled bool +} + +var customizeInit sync.Once + +func InitClient(config ClientConfig) { + customizeInit.Do(func() { + httpClient = &http.Client{} + transport = &http.Transport{ + MaxIdleConnsPerHost: defaultMaxIdleConnsPerHost, + ResponseHeaderTimeout: defaultResponseHeaderTimeout, + Dial: func(network, address string) (net.Conn, error) { + conn, err := net.DialTimeout(network, address, defaultDialTimeout) + if err != nil { + return nil, err + } + tc := &timeoutConn{conn, defaultSmallInterval, defaultLargeInterval} + tc.SetReadDeadline(time.Now().Add(defaultLargeInterval)) + return tc, nil + }, + } + httpClient.Transport = transport + if config.RedirectDisabled { + httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + } + }) +} + +// Execute - do the http requset and get the response +// +// PARAMS: +// - request: the http request instance to be sent +// RETURNS: +// - response: the http response returned from the server +// - error: nil if ok otherwise the specific error +func Execute(request *Request) (*Response, error) { + // Build the request object for the current requesting + httpRequest := &http.Request{ + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + } + + // Set the connection timeout for current request + httpClient.Timeout = time.Duration(request.Timeout()) * time.Second + + // Set the request method + httpRequest.Method = request.Method() + + // Set the request url + internalUrl := &url.URL{ + Scheme: request.Protocol(), + Host: request.Host(), + Path: request.Uri(), + RawQuery: request.QueryString()} + httpRequest.URL = internalUrl + + // Set the request headers + internalHeader := make(http.Header) + for k, v := range request.Headers() { + val := make([]string, 0, 1) + val = append(val, v) + internalHeader[k] = val + } + httpRequest.Header = internalHeader + + if request.Body() != nil { + if request.Length() > 0 { + httpRequest.ContentLength = request.Length() + httpRequest.Body = request.Body() + } else if request.Length() < 0 { + // if set body and ContentLength <= 0, will be chunked + httpRequest.Body = request.Body() + } // else {} body == nil and ContentLength == 0 + } + + // Set the proxy setting if needed + if len(request.ProxyUrl()) != 0 { + transport.Proxy = func(_ *http.Request) (*url.URL, error) { + return url.Parse(request.ProxyUrl()) + } + } + + // Perform the http request and get response + // It needs to explicitly close the keep-alive connections when error occurs for the request + // that may continue sending request's data subsequently. + start := time.Now() + + httpResponse, err := httpClient.Do(httpRequest) + + end := time.Now() + if err != nil { + transport.CloseIdleConnections() + return nil, err + } + if httpResponse.StatusCode >= 400 && + (httpRequest.Method == PUT || httpRequest.Method == POST) { + transport.CloseIdleConnections() + } + response := &Response{httpResponse, end.Sub(start)} + return response, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/constants.go b/vendor/github.com/baidubce/bce-sdk-go/http/constants.go new file mode 100644 index 0000000000000..69c5e426d9353 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/constants.go @@ -0,0 +1,83 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// constants.go - defines constants of the BCE http package including headers and methods + +package http + +// Constants of the supported HTTP methods for BCE +const ( + GET = "GET" + PUT = "PUT" + POST = "POST" + DELETE = "DELETE" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" +) + +// Constants of the HTTP headers for BCE +const ( + // Standard HTTP Headers + AUTHORIZATION = "Authorization" + CACHE_CONTROL = "Cache-Control" + CONTENT_DISPOSITION = "Content-Disposition" + CONTENT_ENCODING = "Content-Encoding" + CONTENT_LANGUAGE = "Content-Language" + CONTENT_LENGTH = "Content-Length" + CONTENT_MD5 = "Content-Md5" + CONTENT_RANGE = "Content-Range" + CONTENT_TYPE = "Content-Type" + DATE = "Date" + ETAG = "Etag" + EXPIRES = "Expires" + HOST = "Host" + LAST_MODIFIED = "Last-Modified" + LOCATION = "Location" + RANGE = "Range" + SERVER = "Server" + TRANSFER_ENCODING = "Transfer-Encoding" + USER_AGENT = "User-Agent" + + // BCE Common HTTP Headers + BCE_PREFIX = "x-bce-" + BCE_ACL = "x-bce-acl" + BCE_GRANT_READ = "x-bce-grant-read" + BCE_GRANT_FULL_CONTROL = "x-bce-grant-full-control" + BCE_CONTENT_SHA256 = "x-bce-content-sha256" + BCE_CONTENT_CRC32 = "x-bce-content-crc32" + BCE_REQUEST_ID = "x-bce-request-id" + BCE_USER_METADATA_PREFIX = "x-bce-meta-" + BCE_SECURITY_TOKEN = "x-bce-security-token" + BCE_DATE = "x-bce-date" + + // BOS HTTP Headers + BCE_COPY_METADATA_DIRECTIVE = "x-bce-metadata-directive" + BCE_COPY_SOURCE = "x-bce-copy-source" + BCE_COPY_SOURCE_IF_MATCH = "x-bce-copy-source-if-match" + BCE_COPY_SOURCE_IF_MODIFIED_SINCE = "x-bce-copy-source-if-modified-since" + BCE_COPY_SOURCE_IF_NONE_MATCH = "x-bce-copy-source-if-none-match" + BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE = "x-bce-copy-source-if-unmodified-since" + BCE_COPY_SOURCE_RANGE = "x-bce-copy-source-range" + BCE_DEBUG_ID = "x-bce-debug-id" + BCE_OBJECT_TYPE = "x-bce-object-type" + BCE_NEXT_APPEND_OFFSET = "x-bce-next-append-offset" + BCE_STORAGE_CLASS = "x-bce-storage-class" + BCE_PROCESS = "x-bce-process" + BCE_RESTORE_TIER = "x-bce-restore-tier" + BCE_RESTORE_DAYS = "x-bce-restore-days" + BCE_RESTORE = "x-bce-restore" + BCE_FORBID_OVERWRITE = "x-bce-forbid-overwrite" + BCE_SYMLINK_TARGET = "x-bce-symlink-target" +) diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/request.go b/vendor/github.com/baidubce/bce-sdk-go/http/request.go new file mode 100644 index 0000000000000..7fecfb99a460e --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/request.go @@ -0,0 +1,221 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// request.go - the custom HTTP request for BCE + +package http + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/baidubce/bce-sdk-go/util" +) + +// Reauest stands for the general http request structure to make request to the BCE services. +type Request struct { + protocol string + host string + port int + method string + uri string + proxyUrl string + timeout int + headers map[string]string + params map[string]string + + // Optional body and length fields to set the body stream and content length + body io.ReadCloser + length int64 +} + +func (r *Request) Protocol() string { + return r.protocol +} + +func (r *Request) SetProtocol(protocol string) { + r.protocol = protocol +} + +func (r *Request) Endpoint() string { + if r.host == "" { + return "" + } + return r.protocol + "://" + r.host +} + +func (r *Request) SetEndpoint(endpoint string) { + pos := strings.Index(endpoint, "://") + rest := endpoint + if pos != -1 { + r.protocol = endpoint[0:pos] + rest = endpoint[pos+3:] + } else { + r.protocol = "http" + } + + r.SetHost(rest) +} + +func (r *Request) Host() string { + return r.host +} + +func (r *Request) SetHost(host string) { + r.host = host + pos := strings.Index(host, ":") + if pos != -1 { + p, e := strconv.Atoi(host[pos+1:]) + if e == nil { + r.port = p + } + } + + if r.port == 0 { + if r.protocol == "http" { + r.port = 80 + } else if r.protocol == "https" { + r.port = 443 + } + } +} + +func (r *Request) Port() int { + return r.port +} + +func (r *Request) SetPort(port int) { + // Port can be set by the endpoint or host, this method is rarely used. + r.port = port +} + +func (r *Request) Headers() map[string]string { + return r.headers +} + +func (r *Request) SetHeaders(headers map[string]string) { + r.headers = headers +} + +func (r *Request) Header(key string) string { + if v, ok := r.headers[key]; ok { + return v + } + return "" +} + +func (r *Request) SetHeader(key, value string) { + if r.headers == nil { + r.headers = make(map[string]string) + } + r.headers[key] = value +} + +func (r *Request) Params() map[string]string { + return r.params +} + +func (r *Request) SetParams(params map[string]string) { + r.params = params +} + +func (r *Request) Param(key string) string { + if v, ok := r.params[key]; ok { + return v + } + return "" +} + +func (r *Request) SetParam(key, value string) { + if r.params == nil { + r.params = make(map[string]string) + } + r.params[key] = value +} + +func (r *Request) QueryString() string { + buf := make([]string, 0, len(r.params)) + for k, v := range r.params { + buf = append(buf, util.UriEncode(k, true)+"="+util.UriEncode(v, true)) + } + return strings.Join(buf, "&") +} + +func (r *Request) Method() string { + return r.method +} + +func (r *Request) SetMethod(method string) { + r.method = method +} + +func (r *Request) Uri() string { + return r.uri +} + +func (r *Request) SetUri(uri string) { + r.uri = uri +} + +func (r *Request) ProxyUrl() string { + return r.proxyUrl +} + +func (r *Request) SetProxyUrl(url string) { + r.proxyUrl = url +} + +func (r *Request) Timeout() int { + return r.timeout +} + +func (r *Request) SetTimeout(timeout int) { + r.timeout = timeout +} + +func (r *Request) Body() io.ReadCloser { + return r.body +} + +func (r *Request) SetBody(stream io.ReadCloser) { + r.body = stream +} + +func (r *Request) Length() int64 { + return r.length +} + +func (r *Request) SetLength(l int64) { + r.length = l +} + +func (r *Request) GenerateUrl(addPort bool) string { + if addPort { + return fmt.Sprintf("%s://%s:%d%s?%s", + r.protocol, r.host, r.port, r.uri, r.QueryString()) + } else { + return fmt.Sprintf("%s://%s%s?%s", r.protocol, r.host, r.uri, r.QueryString()) + } +} + +func (r *Request) String() string { + header := make([]string, 0, len(r.headers)) + for k, v := range r.headers { + header = append(header, "\t"+k+"="+v) + } + return fmt.Sprintf("\t%s %s\n%v", + r.method, r.GenerateUrl(false), strings.Join(header, "\n")) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/response.go b/vendor/github.com/baidubce/bce-sdk-go/http/response.go new file mode 100644 index 0000000000000..98632fc9fd08b --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/response.go @@ -0,0 +1,74 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// response.go - the custom HTTP response for BCE + +package http + +import ( + "io" + "net/http" + "time" +) + +// Response defines the general http response structure for accessing the BCE services. +type Response struct { + httpResponse *http.Response // the standard libaray http.Response object + elapsedTime time.Duration // elapsed time just from sending request to receiving response +} + +func (r *Response) StatusText() string { + return r.httpResponse.Status +} + +func (r *Response) StatusCode() int { + return r.httpResponse.StatusCode +} + +func (r *Response) Protocol() string { + return r.httpResponse.Proto +} + +func (r *Response) HttpResponse() *http.Response { + return r.httpResponse +} + +func (r *Response) SetHttpResponse(response *http.Response) { + r.httpResponse = response +} + +func (r *Response) ElapsedTime() time.Duration { + return r.elapsedTime +} + +func (r *Response) GetHeader(name string) string { + return r.httpResponse.Header.Get(name) +} + +func (r *Response) GetHeaders() map[string]string { + header := r.httpResponse.Header + ret := make(map[string]string, len(header)) + for k, v := range header { + ret[k] = v[0] + } + return ret +} + +func (r *Response) ContentLength() int64 { + return r.httpResponse.ContentLength +} + +func (r *Response) Body() io.ReadCloser { + return r.httpResponse.Body +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go new file mode 100644 index 0000000000000..177467d84c1d7 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go @@ -0,0 +1,1090 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// bucket.go - the bucket APIs definition supported by the BOS service + +// Package api defines all APIs supported by the BOS service of BCE. +package api + +import ( + "encoding/json" + "strconv" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" +) + +// ListBuckets - list all buckets of the account +// +// PARAMS: +// - cli: the client agent which can perform sending request +// RETURNS: +// - *ListBucketsResult: the result bucket list structure +// - error: nil if ok otherwise the specific error +func ListBuckets(cli bce.Client) (*ListBucketsResult, error) { + req := &bce.BceRequest{} + req.SetMethod(http.GET) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListBucketsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// ListObjects - list all objects of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - args: the optional arguments to list objects +// RETURNS: +// - *ListObjectsResult: the result object list structure +// - error: nil if ok otherwise the specific error +func ListObjects(cli bce.Client, bucket string, + args *ListObjectsArgs) (*ListObjectsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + + // Optional arguments settings + if args != nil { + if len(args.Delimiter) != 0 { + req.SetParam("delimiter", args.Delimiter) + } + if len(args.Marker) != 0 { + req.SetParam("marker", args.Marker) + } + if args.MaxKeys != 0 { + req.SetParam("maxKeys", strconv.Itoa(args.MaxKeys)) + } + if len(args.Prefix) != 0 { + req.SetParam("prefix", args.Prefix) + } + } + if args == nil || args.MaxKeys == 0 { + req.SetParam("maxKeys", "1000") + } + + // Send the request and get result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListObjectsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// HeadBucket - test the given bucket existed and access authority +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if exists and have authority otherwise the specific error +func HeadBucket(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.HEAD) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucket - create a new bucket with the given name +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the new bucket name +// RETURNS: +// - string: the location of the new bucket if create success +// - error: nil if create success otherwise the specific error +func PutBucket(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return resp.Header(http.LOCATION), nil +} + +// DeleteBucket - delete an empty bucket by given name +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name to be deleted +// RETURNS: +// - error: nil if delete success otherwise the specific error +func DeleteBucket(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketLocation - get the location of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - string: the location of the bucket +// - error: nil if delete success otherwise the specific error +func GetBucketLocation(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("location", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + result := &LocationType{} + if err := resp.ParseJsonBody(result); err != nil { + return "", err + } + return result.LocationConstraint, nil +} + +// PutBucketAcl - set the acl of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - cannedAcl: support private, public-read, public-read-write +// - aclBody: the acl file body +// RETURNS: +// - error: nil if delete success otherwise the specific error +func PutBucketAcl(cli bce.Client, bucket, cannedAcl string, aclBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("acl", "") + + // The acl setting + if len(cannedAcl) != 0 && aclBody != nil { + return bce.NewBceClientError("BOS does not support cannedAcl and acl file at the same time") + } + if validCannedAcl(cannedAcl) { + req.SetHeader(http.BCE_ACL, cannedAcl) + } + if aclBody != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(aclBody) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketAcl - get the acl of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - *GetBucketAclResult: the result of the bucket acl +// - error: nil if success otherwise the specific error +func GetBucketAcl(cli bce.Client, bucket string) (*GetBucketAclResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("acl", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketAclResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// PutBucketLogging - set the logging prefix of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - logging: the logging prefix json string body +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketLogging(cli bce.Client, bucket string, logging *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("logging", "") + req.SetBody(logging) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketLogging - get the logging config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - *GetBucketLoggingResult: the logging setting of the bucket +// - error: nil if success otherwise the specific error +func GetBucketLogging(cli bce.Client, bucket string) (*GetBucketLoggingResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("logging", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketLoggingResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketLogging - delete the logging setting of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketLogging(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("logging", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketLifecycle - set the lifecycle rule of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - lifecycle: the lifecycle rule json string body +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketLifecycle(cli bce.Client, bucket string, lifecycle *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("lifecycle", "") + req.SetBody(lifecycle) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketLifecycle - get the lifecycle rule of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - *GetBucketLifecycleResult: the lifecycle rule of the bucket +// - error: nil if success otherwise the specific error +func GetBucketLifecycle(cli bce.Client, bucket string) (*GetBucketLifecycleResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("lifecycle", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketLifecycleResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketLifecycle - delete the lifecycle rule of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketLifecycle(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("lifecycle", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketStorageclass - set the storage class of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - storageClass: the storage class string +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketStorageclass(cli bce.Client, bucket, storageClass string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("storageClass", "") + + obj := &StorageClassType{storageClass} + jsonBytes, jsonErr := json.Marshal(obj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + req.SetBody(body) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketStorageclass - get the storage class of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - string: the storage class of the bucket +// - error: nil if success otherwise the specific error +func GetBucketStorageclass(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("storageClass", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + result := &StorageClassType{} + if err := resp.ParseJsonBody(result); err != nil { + return "", err + } + return result.StorageClass, nil +} + +// PutBucketReplication - set the bucket replication of different region +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationConf: the replication config body stream +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketReplication(cli bce.Client, bucket string, replicationConf *bce.Body, replicationRuleId string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("replication", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + + if replicationConf != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(replicationConf) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketReplication - get the bucket replication config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *GetBucketReplicationResult: the result of the bucket replication config +// - error: nil if success otherwise the specific error +func GetBucketReplication(cli bce.Client, bucket string, replicationRuleId string) (*GetBucketReplicationResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("replication", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketReplicationResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// ListBucketReplication - list all replication config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func ListBucketReplication(cli bce.Client, bucket string) (*ListBucketReplicationResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("replication", "") + req.SetParam("list", "") + resp := &bce.BceResponse{} + if err := cli.SendRequest(req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListBucketReplicationResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketReplication - delete the bucket replication config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketReplication(cli bce.Client, bucket string, replicationRuleId string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("replication", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketReplicationProgress - get the bucket replication process of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *GetBucketReplicationProgressResult: the result of the bucket replication process +// - error: nil if success otherwise the specific error +func GetBucketReplicationProgress(cli bce.Client, bucket string, replicationRuleId string) ( + *GetBucketReplicationProgressResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("replicationProgress", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketReplicationProgressResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// PutBucketEncryption - set the bucket encrpytion config +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - algorithm: the encryption algorithm +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketEncryption(cli bce.Client, bucket, algorithm string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("encryption", "") + + obj := &BucketEncryptionType{algorithm} + jsonBytes, jsonErr := json.Marshal(obj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(body) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketEncryption - get the encryption config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - algorithm: the bucket encryption algorithm +// - error: nil if success otherwise the specific error +func GetBucketEncryption(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("encryption", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + result := &BucketEncryptionType{} + if err := resp.ParseJsonBody(result); err != nil { + return "", err + } + return result.EncryptionAlgorithm, nil +} + +// DeleteBucketEncryption - delete the encryption config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketEncryption(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("encryption", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketStaticWebsite - set the bucket static website config +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - confBody: the static website config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketStaticWebsite(cli bce.Client, bucket string, confBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("website", "") + if confBody != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(confBody) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketStaticWebsite - get the static website config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - result: the bucket static website config result object +// - error: nil if success otherwise the specific error +func GetBucketStaticWebsite(cli bce.Client, bucket string) ( + *GetBucketStaticWebsiteResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("website", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketStaticWebsiteResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketStaticWebsite - delete the static website config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketStaticWebsite(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("website", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketCors - set the bucket CORS config +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - confBody: the CORS config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketCors(cli bce.Client, bucket string, confBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("cors", "") + if confBody != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(confBody) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketCors - get the CORS config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - result: the bucket CORS config result object +// - error: nil if success otherwise the specific error +func GetBucketCors(cli bce.Client, bucket string) ( + *GetBucketCorsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("cors", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketCorsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketCors - delete the CORS config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketCors(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("cors", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketCopyrightProtection - set the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - resources: the resource items in the bucket to be protected +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketCopyrightProtection(cli bce.Client, bucket string, resources ...string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("copyrightProtection", "") + if len(resources) == 0 { + return bce.NewBceClientError("the resource to set copyright protection is empty") + } + arg := &CopyrightProtectionType{resources} + jsonBytes, jsonErr := json.Marshal(arg) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(body) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketCopyrightProtection - get the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - result: the bucket copyright protection resources array +// - error: nil if success otherwise the specific error +func GetBucketCopyrightProtection(cli bce.Client, bucket string) ([]string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("copyrightProtection", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &CopyrightProtectionType{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result.Resource, nil +} + +// DeleteBucketCopyrightProtection - delete the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketCopyrightProtection(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("copyrightProtection", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketTrash - put the trash setting of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - trashDir: the trash dir name +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketTrash(cli bce.Client, bucket string, trashReq PutBucketTrashReq) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("trash", "") + reqByte, _ := json.Marshal(trashReq) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetBucketTrash(cli bce.Client, bucket string) (*GetBucketTrashResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("trash", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketTrashResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +func DeleteBucketTrash(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("trash", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func PutBucketNotification(cli bce.Client, bucket string, putBucketNotificationReq PutBucketNotificationReq) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("notification", "") + reqByte, _ := json.Marshal(putBucketNotificationReq) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetBucketNotification(cli bce.Client, bucket string) (*PutBucketNotificationReq, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("notification", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &PutBucketNotificationReq{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +func DeleteBucketNotification(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("notification", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go new file mode 100644 index 0000000000000..f22028f62a93b --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go @@ -0,0 +1,579 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// model.go - definitions of the request arguments and results data structure model + +package api + +import ( + "io" +) + +type OwnerType struct { + Id string `json:"id"` + DisplayName string `json:"displayName"` +} + +type BucketSummaryType struct { + Name string `json:"name"` + Location string `json:"location"` + CreationDate string `json:"creationDate"` +} + +// ListBucketsResult defines the result structure of ListBuckets api. +type ListBucketsResult struct { + Owner OwnerType `json:"owner"` + Buckets []BucketSummaryType `json:"buckets"` +} + +// ListObjectsArgs defines the optional arguments for ListObjects api. +type ListObjectsArgs struct { + Delimiter string `json:"delimiter"` + Marker string `json:"marker"` + MaxKeys int `json:"maxKeys"` + Prefix string `json:"prefix"` +} + +type ObjectSummaryType struct { + Key string `json:"key"` + LastModified string `json:"lastModified"` + ETag string `json:"eTag"` + Size int `json:"size"` + StorageClass string `json:"storageClass"` + Owner OwnerType `json:"owner"` +} + +type PrefixType struct { + Prefix string `json:"prefix"` +} + +// ListObjectsResult defines the result structure of ListObjects api. +type ListObjectsResult struct { + Name string `json:"name"` + Prefix string `json:"prefix"` + Delimiter string `json:"delimiter"` + Marker string `json:"marker"` + NextMarker string `json:"nextMarker,omitempty"` + MaxKeys int `json:"maxKeys"` + IsTruncated bool `json:"isTruncated"` + Contents []ObjectSummaryType `json:"contents"` + CommonPrefixes []PrefixType `json:"commonPrefixes"` +} + +type LocationType struct { + LocationConstraint string `json:"locationConstraint"` +} + +// AclOwnerType defines the owner struct in ACL setting +type AclOwnerType struct { + Id string `json:"id"` +} + +// GranteeType defines the grantee struct in ACL setting +type GranteeType struct { + Id string `json:"id"` +} + +type AclRefererType struct { + StringLike []string `json:"stringLike"` + StringEquals []string `json:"stringEquals"` +} + +type AclCondType struct { + IpAddress []string `json:"ipAddress"` + Referer AclRefererType `json:"referer"` +} + +// GrantType defines the grant struct in ACL setting +type GrantType struct { + Grantee []GranteeType `json:"grantee"` + Permission []string `json:"permission"` + Resource []string `json:"resource,omitempty"` + NotResource []string `json:"notResource,omitempty"` + Condition AclCondType `json:"condition,omitempty"` +} + +// PutBucketAclArgs defines the input args structure for putting bucket acl. +type PutBucketAclArgs struct { + AccessControlList []GrantType `json:"accessControlList"` +} + +// GetBucketAclResult defines the result structure of getting bucket acl. +type GetBucketAclResult struct { + AccessControlList []GrantType `json:"accessControlList"` + Owner AclOwnerType `json:"owner"` +} + +// PutBucketLoggingArgs defines the input args structure for putting bucket logging. +type PutBucketLoggingArgs struct { + TargetBucket string `json:"targetBucket"` + TargetPrefix string `json:"targetPrefix"` +} + +// GetBucketLoggingResult defines the result structure for getting bucket logging. +type GetBucketLoggingResult struct { + Status string `json:"status"` + TargetBucket string `json:"targetBucket,omitempty"` + TargetPrefix string `json:"targetPrefix, omitempty"` +} + +// LifecycleConditionTimeType defines the structure of time condition +type LifecycleConditionTimeType struct { + DateGreaterThan string `json:"dateGreaterThan"` +} + +// LifecycleConditionType defines the structure of condition +type LifecycleConditionType struct { + Time LifecycleConditionTimeType `json:"time"` +} + +// LifecycleActionType defines the structure of lifecycle action +type LifecycleActionType struct { + Name string `json:"name"` + StorageClass string `json:"storageClass,omitempty"` +} + +// LifecycleRuleType defines the structure of a single lifecycle rule +type LifecycleRuleType struct { + Id string `json:"id"` + Status string `json:"status"` + Resource []string `json:"resource"` + Condition LifecycleConditionType `json:"condition"` + Action LifecycleActionType `json:"action"` +} + +// GetBucketLifecycleResult defines the lifecycle argument structure for putting +type PutBucketLifecycleArgs struct { + Rule []LifecycleRuleType `json:"rule"` +} + +// GetBucketLifecycleResult defines the lifecycle result structure for getting +type GetBucketLifecycleResult struct { + Rule []LifecycleRuleType `json:"rule"` +} + +type StorageClassType struct { + StorageClass string `json:"storageClass"` +} + +// BucketReplicationDescriptor defines the description data structure +type BucketReplicationDescriptor struct { + Bucket string `json:"bucket,omitempty"` + StorageClass string `json:"storageClass,omitempty"` +} + +// BucketReplicationType defines the data structure for Put and Get of bucket replication +type BucketReplicationType struct { + Id string `json:"id"` + Status string `json:"status"` + Resource []string `json:"resource"` + ReplicateDeletes string `json:"replicateDeletes"` + Destination *BucketReplicationDescriptor `json:"destination,omitempty"` + ReplicateHistory *BucketReplicationDescriptor `json:"replicateHistory,omitempty"` + CreateTime int64 `json:"createTime"` + DestRegion string `json:"destRegion"` +} + +type PutBucketReplicationArgs BucketReplicationType +type GetBucketReplicationResult BucketReplicationType + +// ListBucketReplicationResult defines output result for replication conf list +type ListBucketReplicationResult struct { + Rules []BucketReplicationType `json:"rules"` +} + +// GetBucketReplicationProgressResult defines output result for replication process +type GetBucketReplicationProgressResult struct { + Status string `json:"status"` + HistoryReplicationPercent float64 `json:"historyReplicationPercent"` + LatestReplicationTime string `json:"latestReplicationTime"` +} + +// BucketEncryptionType defines the data structure for Put and Get of bucket encryption +type BucketEncryptionType struct { + EncryptionAlgorithm string `json:"encryptionAlgorithm"` +} + +// BucketStaticWebsiteType defines the data structure for Put and Get of bucket static website +type BucketStaticWebsiteType struct { + Index string `json:"index"` + NotFound string `json:"notFound"` +} + +type PutBucketStaticWebsiteArgs BucketStaticWebsiteType +type GetBucketStaticWebsiteResult BucketStaticWebsiteType + +type BucketCORSType struct { + AllowedOrigins []string `json:"allowedOrigins"` + AllowedMethods []string `json:"allowedMethods"` + AllowedHeaders []string `json:"allowedHeaders,omitempty"` + AllowedExposeHeaders []string `json:"allowedExposeHeaders,omitempty"` + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` +} + +// PutBucketCorsArgs defines the request argument for bucket CORS setting +type PutBucketCorsArgs struct { + CorsConfiguration []BucketCORSType `json:"corsConfiguration"` +} + +// GetBucketCorsResult defines the data structure of getting bucket CORS result +type GetBucketCorsResult struct { + CorsConfiguration []BucketCORSType `json:"corsConfiguration"` +} + +// CopyrightProtectionType defines the data structure for Put and Get copyright protection API +type CopyrightProtectionType struct { + Resource []string `json:"resource"` +} + +// ObjectAclType defines the data structure for Put and Get object acl API +type ObjectAclType struct { + AccessControlList []GrantType `json:"accessControlList"` +} + +type PutObjectAclArgs ObjectAclType +type GetObjectAclResult ObjectAclType + +// PutObjectArgs defines the optional args structure for the put object api. +type PutObjectArgs struct { + CacheControl string + ContentDisposition string + ContentMD5 string + ContentType string + ContentLength int64 + Expires string + UserMeta map[string]string + ContentSha256 string + ContentCrc32 string + StorageClass string + Process string +} + +// CopyObjectArgs defines the optional args structure for the copy object api. +type CopyObjectArgs struct { + ObjectMeta + MetadataDirective string + IfMatch string + IfNoneMatch string + IfModifiedSince string + IfUnmodifiedSince string +} + +type MultiCopyObjectArgs struct { + StorageClass string +} + +// CopyObjectResult defines the result json structure for the copy object api. +type CopyObjectResult struct { + LastModified string `json:"lastModified"` + ETag string `json:"eTag"` +} + +type ObjectMeta struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLength int64 + ContentRange string + ContentType string + ContentMD5 string + ContentSha256 string + ContentCrc32 string + Expires string + LastModified string + ETag string + UserMeta map[string]string + StorageClass string + NextAppendOffset string + ObjectType string + BceRestore string + BceObjectType string +} + +// GetObjectResult defines the result data of the get object api. +type GetObjectResult struct { + ObjectMeta + ContentLanguage string + Body io.ReadCloser +} + +// GetObjectMetaResult defines the result data of the get object meta api. +type GetObjectMetaResult struct { + ObjectMeta +} + +// SelectObjectResult defines the result data of the select object api. +type SelectObjectResult struct { + Body io.ReadCloser +} + +// selectObject request args +type SelectObjectArgs struct { + SelectType string `json: "-"` + SelectRequest *SelectObjectRequest `json: "selectRequest"` +} + +type SelectObjectRequest struct { + Expression string `json:"expression"` + ExpressionType string `json:"expressionType"` // SQL + InputSerialization *SelectObjectInput `json:"inputSerialization"` + OutputSerialization *SelectObjectOutput `json:"outputSerialization"` + RequestProgress *SelectObjectProgress `json:"requestProgress"` +} + +type SelectObjectInput struct { + CompressionType string `json:"compressionType"` + CsvParams map[string]string `json:"csv"` + JsonParams map[string]string `json:"json"` +} +type SelectObjectOutput struct { + OutputHeader bool `json:"outputHeader"` + CsvParams map[string]string `json:"csv"` + JsonParams map[string]string `json:"json"` +} +type SelectObjectProgress struct { + Enabled bool `json:"enabled"` +} + +type Prelude struct { + TotalLen uint32 + HeadersLen uint32 +} + +// selectObject response msg +type CommonMessage struct { + Prelude + Headers map[string]string // message-type/content-type…… + Crc32 uint32 // crc32 of RecordsMessage +} +type RecordsMessage struct { + CommonMessage + Records []string // csv/json seleted data, one or more records +} +type ContinuationMessage struct { + CommonMessage + BytesScanned uint64 + BytesReturned uint64 +} +type EndMessage struct { + CommonMessage +} + +// FetchObjectArgs defines the optional arguments structure for the fetch object api. +type FetchObjectArgs struct { + FetchMode string + StorageClass string +} + +// FetchObjectResult defines the result json structure for the fetch object api. +type FetchObjectResult struct { + Code string `json:"code"` + Message string `json:"message"` + RequestId string `json:"requestId"` + JobId string `json:"jobId"` +} + +// AppendObjectArgs defines the optional arguments structure for appending object. +type AppendObjectArgs struct { + Offset int64 + CacheControl string + ContentDisposition string + ContentMD5 string + ContentType string + Expires string + UserMeta map[string]string + ContentSha256 string + ContentCrc32 string + StorageClass string +} + +// AppendObjectResult defines the result data structure for appending object. +type AppendObjectResult struct { + ContentMD5 string + NextAppendOffset int64 + ContentCrc32 string + ETag string +} + +// DeleteObjectArgs defines the input args structure for a single object. +type DeleteObjectArgs struct { + Key string `json:"key"` +} + +// DeleteMultipleObjectsResult defines the input args structure for deleting multiple objects. +type DeleteMultipleObjectsArgs struct { + Objects []DeleteObjectArgs `json:"objects"` +} + +// DeleteObjectResult defines the result structure for deleting a single object. +type DeleteObjectResult struct { + Key string `json:"key"` + Code string `json:"code"` + Message string `json:"message"` +} + +// DeleteMultipleObjectsResult defines the result structure for deleting multiple objects. +type DeleteMultipleObjectsResult struct { + Errors []DeleteObjectResult `json:"errors"` +} + +// InitiateMultipartUploadArgs defines the input arguments to initiate a multipart upload. +type InitiateMultipartUploadArgs struct { + CacheControl string + ContentDisposition string + Expires string + StorageClass string +} + +// InitiateMultipartUploadResult defines the result structure to initiate a multipart upload. +type InitiateMultipartUploadResult struct { + Bucket string `json:"bucket"` + Key string `json:"key"` + UploadId string `json:"uploadId"` +} + +// UploadPartArgs defines the optinoal argumets for uploading part. +type UploadPartArgs struct { + ContentMD5 string + ContentSha256 string + ContentCrc32 string +} + +// UploadPartCopyArgs defines the optional arguments of UploadPartCopy. +type UploadPartCopyArgs struct { + SourceRange string + IfMatch string + IfNoneMatch string + IfModifiedSince string + IfUnmodifiedSince string +} + +type PutSymlinkArgs struct { + ForbidOverwrite string + StorageClass string + UserMeta map[string]string +} + +// UploadInfoType defines an uploaded part info structure. +type UploadInfoType struct { + PartNumber int `json:"partNumber"` + ETag string `json:"eTag"` +} + +// CompleteMultipartUploadArgs defines the input arguments structure of CompleteMultipartUpload. +type CompleteMultipartUploadArgs struct { + Parts []UploadInfoType `json:"parts"` + UserMeta map[string]string `json:"-"` + Process string `json:"-"` + ContentCrc32 string `json:"-"` +} + +// CompleteMultipartUploadResult defines the result structure of CompleteMultipartUpload. +type CompleteMultipartUploadResult struct { + Location string `json:"location"` + Bucket string `json:"bucket"` + Key string `json:"key"` + ETag string `json:"eTag"` + ContentCrc32 string `json:"-"` +} + +// ListPartsArgs defines the input optional arguments of listing parts information. +type ListPartsArgs struct { + MaxParts int + PartNumberMarker string +} + +type ListPartType struct { + PartNumber int `json:"partNumber"` + LastModified string `json:"lastModified"` + ETag string `json:"eTag"` + Size int `json:"size"` +} + +// ListPartsResult defines the parts info result from ListParts. +type ListPartsResult struct { + Bucket string `json:"bucket"` + Key string `json:"key"` + UploadId string `json:"uploadId"` + Initiated string `json:"initiated"` + Owner OwnerType `json:"owner"` + StorageClass string `json:"storageClass"` + PartNumberMarker int `json:"partNumberMarker"` + NextPartNumberMarker int `json:"nextPartNumberMarker"` + MaxParts int `json:"maxParts"` + IsTruncated bool `json:"isTruncated"` + Parts []ListPartType `json:"parts"` +} + +// ListMultipartUploadsArgs defines the optional arguments for ListMultipartUploads. +type ListMultipartUploadsArgs struct { + Delimiter string + KeyMarker string + MaxUploads int + Prefix string +} + +type ListMultipartUploadsType struct { + Key string `json:"key"` + UploadId string `json:"uploadId"` + Owner OwnerType `json:"owner"` + Initiated string `json:"initiated"` + StorageClass string `json:"storageClass,omitempty"` +} + +// ListMultipartUploadsResult defines the multipart uploads result structure. +type ListMultipartUploadsResult struct { + Bucket string `json:"bucket"` + CommonPrefixes []PrefixType `json:"commonPrefixes"` + Delimiter string `json:"delimiter"` + Prefix string `json:"prefix"` + IsTruncated bool `json:"isTruncated"` + KeyMarker string `json:"keyMarker"` + MaxUploads int `json:"maxUploads"` + NextKeyMarker string `json:"nextKeyMarker"` + Uploads []ListMultipartUploadsType `json:"uploads"` +} + +type ArchiveRestoreArgs struct { + RestoreTier string + RestoreDays int +} + +type GetBucketTrashResult struct { + TrashDir string `json:"trashDir"` +} + +type PutBucketTrashReq struct { + TrashDir string `json:"trashDir"` +} + +type PutBucketNotificationReq struct { + Notifications []PutBucketNotificationSt `json:"notifications"` +} + +type PutBucketNotificationSt struct { + Id string `json:"id"` + Name string `json:"name"` + AppId string `json:"appId"` + Status string `json:"status"` + Resources []string `json:"resources"` + Events []string `json:"events"` + Apps []PutBucketNotificationAppsSt `json:"apps"` +} + +type PutBucketNotificationAppsSt struct { + Id string `json:"id"` + EventUrl string `json:"eventUrl"` + XVars string `json:"xVars"` +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go new file mode 100644 index 0000000000000..7a4b813643f2d --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go @@ -0,0 +1,420 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// multipart.go - the multipart-related APIs definition supported by the BOS service + +package api + +import ( + "bytes" + "fmt" + "strings" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" +) + +// InitiateMultipartUpload - initiate a multipart upload to get a upload ID +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - contentType: the content type of the object to be uploaded which should be specified, +// otherwise use the default(application/octet-stream) +// - args: the optional arguments +// RETURNS: +// - *InitiateMultipartUploadResult: the result data structure +// - error: nil if ok otherwise the specific error +func InitiateMultipartUpload(cli bce.Client, bucket, object, contentType string, + args *InitiateMultipartUploadArgs) (*InitiateMultipartUploadResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("uploads", "") + if len(contentType) == 0 { + contentType = RAW_CONTENT_TYPE + } + req.SetHeader(http.CONTENT_TYPE, contentType) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.EXPIRES: args.Expires, + }) + + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &InitiateMultipartUploadResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// UploadPart - upload the single part in the multipart upload process +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func UploadPart(cli bce.Client, bucket, object, uploadId string, partNumber int, + content *bce.Body, args *UploadPartArgs) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("uploadId", uploadId) + req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + if content == nil { + return "", bce.NewBceClientError("upload part content should not be empty") + } + if content.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(content) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CONTENT_MD5: args.ContentMD5, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return strings.Trim(resp.Header(http.ETAG), "\""), nil +} + +// UploadPartFromBytes - upload the single part in the multipart upload process +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func UploadPartFromBytes(cli bce.Client, bucket, object, uploadId string, partNumber int, + content []byte, args *UploadPartArgs) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("uploadId", uploadId) + req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + if content == nil { + return "", bce.NewBceClientError("upload part content should not be empty") + } + size := len(content) + if size >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + // set md5 and content-length + req.SetLength(int64(size)) + if size > 0 { + // calc md5 + if args == nil || args.ContentMD5 == "" { + buf := bytes.NewBuffer(content) + contentMD5, err := util.CalculateContentMD5(buf, int64(size)) + if err != nil { + return "", err + } + req.SetHeader(http.CONTENT_MD5, contentMD5) + } + req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", size)) + } + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CONTENT_MD5: args.ContentMD5, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + } + // Send request and get the result + resp := &bce.BceResponse{} + if err := cli.SendRequestFromBytes(req, resp, content); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return strings.Trim(resp.Header(http.ETAG), "\""), nil +} + +// UploadPartCopy - copy the multipart data +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - source: the copy source uri +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - args: the optional arguments +// RETURNS: +// - *CopyObjectResult: the lastModified and eTag of the part +// - error: nil if ok otherwise the specific error +func UploadPartCopy(cli bce.Client, bucket, object, source, uploadId string, partNumber int, + args *UploadPartCopyArgs) (*CopyObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("uploadId", uploadId) + req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + if len(source) == 0 { + return nil, bce.NewBceClientError("upload part copy source should not be empty") + } + req.SetHeader(http.BCE_COPY_SOURCE, util.UriEncode(source, false)) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.BCE_COPY_SOURCE_RANGE: args.SourceRange, + http.BCE_COPY_SOURCE_IF_MATCH: args.IfMatch, + http.BCE_COPY_SOURCE_IF_NONE_MATCH: args.IfNoneMatch, + http.BCE_COPY_SOURCE_IF_MODIFIED_SINCE: args.IfModifiedSince, + http.BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE: args.IfUnmodifiedSince, + }) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &CopyObjectResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// CompleteMultipartUpload - finish a multipart upload operation +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - parts: all parts info stream +// - meta: user defined meta data +// RETURNS: +// - *CompleteMultipartUploadResult: the result data +// - error: nil if ok otherwise the specific error +func CompleteMultipartUpload(cli bce.Client, bucket, object, uploadId string, + body *bce.Body, args *CompleteMultipartUploadArgs) (*CompleteMultipartUploadResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("uploadId", uploadId) + if body == nil { + return nil, bce.NewBceClientError("upload body info should not be emtpy") + } + if body.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(body) + + // Optional arguments settings + if args.UserMeta != nil { + if err := setUserMetadata(req, args.UserMeta); err != nil { + return nil, err + } + } + if len(args.Process) != 0 { + req.SetHeader(http.BCE_PROCESS, args.Process) + } + if len(args.ContentCrc32) != 0 { + req.SetHeader(http.BCE_CONTENT_CRC32, args.ContentCrc32) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &CompleteMultipartUploadResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + headers := resp.Headers() + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + return result, nil +} + +// AbortMultipartUpload - abort a multipart upload operation +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// RETURNS: +// - error: nil if ok otherwise the specific error +func AbortMultipartUpload(cli bce.Client, bucket, object, uploadId string) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + req.SetParam("uploadId", uploadId) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// ListParts - list the successfully uploaded parts info by upload id +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - args: the optional arguments +// partNumberMarker: return parts after this marker +// maxParts: the max number of return parts, default and maximum is 1000 +// RETURNS: +// - *ListPartsResult: the uploaded parts info result +// - error: nil if ok otherwise the specific error +func ListParts(cli bce.Client, bucket, object, uploadId string, + args *ListPartsArgs) (*ListPartsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + req.SetParam("uploadId", uploadId) + + // Optional arguments settings + if args != nil { + if len(args.PartNumberMarker) > 0 { + req.SetParam("partNumberMarker", args.PartNumberMarker) + } + if args.MaxParts > 0 { + req.SetParam("maxParts", fmt.Sprintf("%d", args.MaxParts)) + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListPartsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// ListMultipartUploads - list the unfinished uploaded parts of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - args: the optional arguments +// RETURNS: +// - *ListMultipartUploadsResult: the unfinished uploaded parts info result +// - error: nil if ok otherwise the specific error +func ListMultipartUploads(cli bce.Client, bucket string, + args *ListMultipartUploadsArgs) (*ListMultipartUploadsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("uploads", "") + + // Optional arguments settings + if args != nil { + if len(args.Delimiter) > 0 { + req.SetParam("delimiter", args.Delimiter) + } + if len(args.KeyMarker) > 0 { + req.SetParam("keyMarker", args.KeyMarker) + } + if args.MaxUploads > 0 { + req.SetParam("maxUploads", fmt.Sprintf("%d", args.MaxUploads)) + } + if len(args.Prefix) > 0 { + req.SetParam("prefix", args.Prefix) + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListMultipartUploadsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go new file mode 100644 index 0000000000000..ecebf69565b58 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go @@ -0,0 +1,931 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// object.go - the object APIs definition supported by the BOS service + +package api + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + + "github.com/baidubce/bce-sdk-go/auth" + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" +) + +// PutObject - put the object from the string or the stream +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - body: the input content of the object +// - args: the optional arguments of this api +// RETURNS: +// - string: the etag of the object +// - error: nil if ok otherwise the specific error +func PutObject(cli bce.Client, bucket, object string, body *bce.Body, + args *PutObjectArgs) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + if body == nil { + return "", bce.NewBceClientError("PutObject body should not be emtpy") + } + if body.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(body) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.CONTENT_TYPE: args.ContentType, + http.EXPIRES: args.Expires, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + if args.ContentLength > 0 { + // User specified Content-Length can be smaller than the body size, so the body should + // be reset. The `net/http.Client' does not support the Content-Length bigger than the + // body size. + if args.ContentLength > body.Size() { + return "", bce.NewBceClientError(fmt.Sprintf("ContentLength %d is bigger than body size %d", args.ContentLength, body.Size())) + } + body, err := bce.NewBodyFromSizedReader(body.Stream(), args.ContentLength) + if err != nil { + return "", bce.NewBceClientError(err.Error()) + } + req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", args.ContentLength)) + req.SetBody(body) // re-assign body + } + + // Reset the contentMD5 if set by user + if len(args.ContentMD5) != 0 { + req.SetHeader(http.CONTENT_MD5, args.ContentMD5) + } + + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return "", bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + + if err := setUserMetadata(req, args.UserMeta); err != nil { + return "", err + } + + if len(args.Process) != 0 { + req.SetHeader(http.BCE_PROCESS, args.Process) + } + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return strings.Trim(resp.Header(http.ETAG), "\""), nil +} + +// CopyObject - copy one object to a new object with new bucket and/or name. It can alse set the +// metadata of the object with the same source and target. +// +// PARAMS: +// - cli: the client object which can perform sending request +// - bucket: the bucket name of the target object +// - object: the name of the target object +// - source: the source object uri +// - *CopyObjectArgs: the optional input args for copying object +// RETURNS: +// - *CopyObjectResult: the result object which contains etag and lastmodified +// - error: nil if ok otherwise the specific error +func CopyObject(cli bce.Client, bucket, object, source string, + args *CopyObjectArgs) (*CopyObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + if len(source) == 0 { + return nil, bce.NewBceClientError("copy source should not be null") + } + req.SetHeader(http.BCE_COPY_SOURCE, util.UriEncode(source, false)) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.CONTENT_ENCODING: args.ContentEncoding, + http.CONTENT_RANGE: args.ContentRange, + http.CONTENT_TYPE: args.ContentType, + http.EXPIRES: args.Expires, + http.LAST_MODIFIED: args.LastModified, + http.ETAG: args.ETag, + http.CONTENT_MD5: args.ContentMD5, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_OBJECT_TYPE: args.ObjectType, + http.BCE_NEXT_APPEND_OFFSET: args.NextAppendOffset, + http.BCE_COPY_SOURCE_IF_MATCH: args.IfMatch, + http.BCE_COPY_SOURCE_IF_NONE_MATCH: args.IfNoneMatch, + http.BCE_COPY_SOURCE_IF_MODIFIED_SINCE: args.IfModifiedSince, + http.BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE: args.IfUnmodifiedSince, + }) + if args.ContentLength != 0 { + req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", args.ContentLength)) + } + if validMetadataDirective(args.MetadataDirective) { + req.SetHeader(http.BCE_COPY_METADATA_DIRECTIVE, args.MetadataDirective) + } else { + if len(args.MetadataDirective) != 0 { + return nil, bce.NewBceClientError( + "invalid metadata directive value: " + args.MetadataDirective) + } + } + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + if err := setUserMetadata(req, args.UserMeta); err != nil { + return nil, err + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + jsonBody := &CopyObjectResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// GetObject - get the object content with range and response-headers-specified support +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - responseHeaders: the optional response headers to get the given object +// - ranges: the optional range start and end to get the given object +// RETURNS: +// - *GetObjectResult: the output content result of the object +// - error: nil if ok otherwise the specific error +func GetObject(cli bce.Client, bucket, object string, responseHeaders map[string]string, + ranges ...int64) (*GetObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + + // Optional arguments settings + if responseHeaders != nil { + for k, v := range responseHeaders { + if _, ok := GET_OBJECT_ALLOWED_RESPONSE_HEADERS[k]; ok { + req.SetParam("response"+k, v) + } + } + } + if len(ranges) != 0 { + rangeStr := "bytes=" + if len(ranges) == 1 { + rangeStr += fmt.Sprintf("%d", ranges[0]) + "-" + } else { + rangeStr += fmt.Sprintf("%d", ranges[0]) + "-" + fmt.Sprintf("%d", ranges[1]) + } + req.SetHeader("Range", rangeStr) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + headers := resp.Headers() + result := &GetObjectResult{} + if val, ok := headers[http.CACHE_CONTROL]; ok { + result.CacheControl = val + } + if val, ok := headers[http.CONTENT_DISPOSITION]; ok { + result.ContentDisposition = val + } + if val, ok := headers[http.CONTENT_LENGTH]; ok { + if length, err := strconv.ParseInt(val, 10, 64); err == nil { + result.ContentLength = length + } + } + if val, ok := headers[http.CONTENT_RANGE]; ok { + result.ContentRange = val + } + if val, ok := headers[http.CONTENT_TYPE]; ok { + result.ContentType = val + } + if val, ok := headers[http.CONTENT_MD5]; ok { + result.ContentMD5 = val + } + if val, ok := headers[http.EXPIRES]; ok { + result.Expires = val + } + if val, ok := headers[http.LAST_MODIFIED]; ok { + result.LastModified = val + } + if val, ok := headers[http.ETAG]; ok { + result.ETag = strings.Trim(val, "\"") + } + if val, ok := headers[http.CONTENT_LANGUAGE]; ok { + result.ContentLanguage = val + } + if val, ok := headers[http.CONTENT_ENCODING]; ok { + result.ContentEncoding = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_SHA256)]; ok { + result.ContentSha256 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_STORAGE_CLASS)]; ok { + result.StorageClass = val + } + bcePrefix := toHttpHeaderKey(http.BCE_USER_METADATA_PREFIX) + for k, v := range headers { + if strings.Index(k, bcePrefix) == 0 { + if result.UserMeta == nil { + result.UserMeta = make(map[string]string) + } + result.UserMeta[k[len(bcePrefix):]] = v + } + } + if val, ok := headers[toHttpHeaderKey(http.BCE_OBJECT_TYPE)]; ok { + result.ObjectType = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_NEXT_APPEND_OFFSET)]; ok { + result.NextAppendOffset = val + } + result.Body = resp.Body() + return result, nil +} + +// GetObjectMeta - get the meta data of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// RETURNS: +// - *GetObjectMetaResult: the result of this api +// - error: nil if ok otherwise the specific error +func GetObjectMeta(cli bce.Client, bucket, object string) (*GetObjectMetaResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.HEAD) + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + headers := resp.Headers() + result := &GetObjectMetaResult{} + if val, ok := headers[http.CACHE_CONTROL]; ok { + result.CacheControl = val + } + if val, ok := headers[http.CONTENT_DISPOSITION]; ok { + result.ContentDisposition = val + } + if val, ok := headers[http.CONTENT_LENGTH]; ok { + if length, err := strconv.ParseInt(val, 10, 64); err == nil { + result.ContentLength = length + } + } + if val, ok := headers[http.CONTENT_RANGE]; ok { + result.ContentRange = val + } + if val, ok := headers[http.CONTENT_TYPE]; ok { + result.ContentType = val + } + if val, ok := headers[http.CONTENT_MD5]; ok { + result.ContentMD5 = val + } + if val, ok := headers[http.EXPIRES]; ok { + result.Expires = val + } + if val, ok := headers[http.LAST_MODIFIED]; ok { + result.LastModified = val + } + if val, ok := headers[http.ETAG]; ok { + result.ETag = strings.Trim(val, "\"") + } + if val, ok := headers[http.CONTENT_ENCODING]; ok { + result.ContentEncoding = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_SHA256)]; ok { + result.ContentSha256 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_STORAGE_CLASS)]; ok { + result.StorageClass = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_RESTORE)]; ok { + result.BceRestore = val + } + if val, ok := headers[http.BCE_OBJECT_TYPE]; ok { + result.BceObjectType = val + } + bcePrefix := toHttpHeaderKey(http.BCE_USER_METADATA_PREFIX) + for k, v := range headers { + if strings.Index(k, bcePrefix) == 0 { + if result.UserMeta == nil { + result.UserMeta = make(map[string]string) + } + result.UserMeta[k[len(bcePrefix):]] = v + } + } + if val, ok := headers[toHttpHeaderKey(http.BCE_OBJECT_TYPE)]; ok { + result.ObjectType = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_NEXT_APPEND_OFFSET)]; ok { + result.NextAppendOffset = val + } + defer func() { resp.Body().Close() }() + return result, nil +} + +// SelectObject - select the object content +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - args: the optional arguments to perform the select operation +// RETURNS: +// - *SelectObjectResult: the output select content result of the object +// - error: nil if ok otherwise the specific error +func SelectObject(cli bce.Client, bucket, object string, args *SelectObjectArgs) (*SelectObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("select", "") + req.SetParam("type", args.SelectType) + + jsonBytes, jsonErr := json.Marshal(args) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + req.SetBody(body) + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + + result := &SelectObjectResult{} + + result.Body = resp.Body() + return result, nil +} + +// FetchObject - fetch the object by the given url and store it to a bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name to store the object +// - object: the name of the object to be stored +// - source: the source url to fetch +// - args: the optional arguments to perform the fetch operation +// RETURNS: +// - *FetchObjectArgs: the result of this api +// - error: nil if ok otherwise the specific error +func FetchObject(cli bce.Client, bucket, object, source string, + args *FetchObjectArgs) (*FetchObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("fetch", "") + if len(source) == 0 { + return nil, bce.NewBceClientError("invalid fetch source value: " + source) + } + req.SetHeader(http.BCE_PREFIX+"fetch-source", source) + + // Optional arguments settings + if args != nil { + if validFetchMode(args.FetchMode) { + req.SetHeader(http.BCE_PREFIX+"fetch-mode", args.FetchMode) + } else { + if len(args.FetchMode) != 0 { + return nil, bce.NewBceClientError("invalid fetch mode value: " + args.FetchMode) + } + } + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + jsonBody := &FetchObjectResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// AppendObject - append the given content to a new or existed object which is appendable +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - content: the content to be appended +// - args: the optional arguments to perform the append operation +// RETURNS: +// - *AppendObjectResult: the result status for this api +// - error: nil if ok otherwise the specific error +func AppendObject(cli bce.Client, bucket, object string, content *bce.Body, + args *AppendObjectArgs) (*AppendObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("append", "") + if content == nil { + return nil, bce.NewBceClientError("AppendObject body should not be emtpy") + } + if content.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(content) + + // Optional arguments settings + if args != nil { + if args.Offset < 0 { + return nil, bce.NewBceClientError( + fmt.Sprintf("invalid append offset value: %d", args.Offset)) + } + if args.Offset > 0 { + req.SetParam("offset", fmt.Sprintf("%d", args.Offset)) + } + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.CONTENT_MD5: args.ContentMD5, + http.CONTENT_TYPE: args.ContentType, + http.EXPIRES: args.Expires, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + if err := setUserMetadata(req, args.UserMeta); err != nil { + return nil, err + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + headers := resp.Headers() + result := &AppendObjectResult{} + if val, ok := headers[http.CONTENT_MD5]; ok { + result.ContentMD5 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_NEXT_APPEND_OFFSET)]; ok { + nextOffset, offsetErr := strconv.ParseInt(val, 10, 64) + if offsetErr != nil { + nextOffset = content.Size() + } + result.NextAppendOffset = nextOffset + } else { + result.NextAppendOffset = content.Size() + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + if val, ok := headers[http.ETAG]; ok { + result.ETag = strings.Trim(val, "\"") + } + defer func() { resp.Body().Close() }() + return result, nil +} + +// DeleteObject - delete the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object to be deleted +// - object: the name of the object +// RETURNS: +// - error: nil if ok otherwise the specific error +func DeleteObject(cli bce.Client, bucket, object string) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// DeleteMultipleObjects - delete the given objects within a single http request +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the objects to be deleted +// - objectListStream: the objects list to be delete with json format +// RETURNS: +// - *DeleteMultipleObjectsResult: the objects failed to delete +// - error: nil if ok otherwise the specific error +func DeleteMultipleObjects(cli bce.Client, bucket string, + objectListStream *bce.Body) (*DeleteMultipleObjectsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.POST) + req.SetParam("delete", "") + req.SetHeader(http.CONTENT_TYPE, "application/json; charset=utf-8") + if objectListStream == nil { + return nil, bce.NewBceClientError("DeleteMultipleObjects body should not be emtpy") + } + if objectListStream.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(objectListStream) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + jsonBody := &DeleteMultipleObjectsResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// GeneratePresignedUrl - generate an authorization url with expire time and optional arguments +// +// PARAMS: +// - conf: the client configuration +// - signer: the client signer object to generate the authorization string +// - bucket: the target bucket name +// - object: the target object name +// - expire: expire time in seconds +// - method: optional sign method, default is GET +// - headers: optional sign headers, default just set the Host +// - params: optional sign params, default is empty +// RETURNS: +// - string: the presigned url with authorization string +func GeneratePresignedUrl(conf *bce.BceClientConfiguration, signer auth.Signer, bucket, + object string, expire int, method string, headers, params map[string]string) string { + req := &bce.BceRequest{} + + // Set basic arguments + if len(method) == 0 { + method = http.GET + } + req.SetMethod(method) + req.SetEndpoint(conf.Endpoint) + if req.Protocol() == "" { + req.SetProtocol(bce.DEFAULT_PROTOCOL) + } + domain := req.Host() + if pos := strings.Index(domain, ":"); pos != -1 { + domain = domain[:pos] + } + if len(bucket) != 0 && net.ParseIP(domain) == nil { // not use an IP as the endpoint by client + req.SetUri(bce.URI_PREFIX + object) + if !conf.CnameEnabled && !isCnameLikeHost(conf.Endpoint) { + req.SetHost(bucket + "." + req.Host()) + } + } else { + req.SetUri(getObjectUri(bucket, object)) + if conf.CnameEnabled || isCnameLikeHost(conf.Endpoint) { + req.SetUri(getCnameUri(req.Uri())) + } + } + + // Set headers and params if given. + req.SetHeader(http.HOST, req.Host()) + if headers != nil { + for k, v := range headers { + req.SetHeader(k, v) + } + } + if params != nil { + for k, v := range params { + req.SetParam(k, v) + } + } + + // Copy one SignOptions object to rewrite it. + option := *conf.SignOption + if expire != 0 { + option.ExpireSeconds = expire + } + + // Generate the authorization string and return the signed url. + signer.Sign(&req.Request, conf.Credentials, &option) + req.SetParam("authorization", req.Header(http.AUTHORIZATION)) + return fmt.Sprintf("%s://%s%s?%s", req.Protocol(), req.Host(), + util.UriEncode(req.Uri(), false), req.QueryString()) +} + +// PutObjectAcl - set the ACL of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - cannedAcl: support private and public-read +// - grantRead: user id list +// - grantFullControl: user id list +// - aclBody: the acl file body +// RETURNS: +// - error: nil if success otherwise the specific error +func PutObjectAcl(cli bce.Client, bucket, object, cannedAcl string, + grantRead, grantFullControl []string, aclBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("acl", "") + + // Joiner for generate the user id list string for grant acl header + joiner := func(ids []string) string { + for i := range ids { + ids[i] = "id=\"" + ids[i] + "\"" + } + return strings.Join(ids, ",") + } + + // Choose a acl setting method + methods := 0 + if len(cannedAcl) != 0 { + methods += 1 + if validCannedAcl(cannedAcl) { + req.SetHeader(http.BCE_ACL, cannedAcl) + } + } + if len(grantRead) != 0 { + methods += 1 + req.SetHeader(http.BCE_GRANT_READ, joiner(grantRead)) + } + if len(grantFullControl) != 0 { + methods += 1 + req.SetHeader(http.BCE_GRANT_FULL_CONTROL, joiner(grantFullControl)) + } + if aclBody != nil { + methods += 1 + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(aclBody) + } + if methods != 1 { + return bce.NewBceClientError("BOS only support one acl setting method at the same time") + } + + // Do sending request + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetObjectAcl - get the ACL of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - result: the object acl result object +// - error: nil if success otherwise the specific error +func GetObjectAcl(cli bce.Client, bucket, object string) (*GetObjectAclResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + req.SetParam("acl", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetObjectAclResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteObjectAcl - delete the ACL of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteObjectAcl(cli bce.Client, bucket, object string) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + req.SetParam("acl", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// RestoreObject - restore the archive object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - args: the restore args +// RETURNS: +// - error: nil if success otherwise the specific error +func RestoreObject(cli bce.Client, bucket string, object string, args ArchiveRestoreArgs) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetParam("restore", "") + req.SetMethod(http.POST) + req.SetHeader(http.BCE_RESTORE_DAYS, strconv.Itoa(args.RestoreDays)) + req.SetHeader(http.BCE_RESTORE_TIER, args.RestoreTier) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + + return nil +} + +// PutObjectSymlink - put the object from the string or the stream +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - symlinkKey: the name of the symlink +// - symlinkArgs: the optional arguments of this api +// RETURNS: +// - error: nil if ok otherwise the specific error +func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey string, symlinkArgs *PutSymlinkArgs) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, symlinkKey)) + req.SetParam("symlink", "") + req.SetMethod(http.PUT) + if symlinkArgs != nil { + if len(symlinkArgs.ForbidOverwrite) != 0 { + if !validForbidOverwrite(symlinkArgs.ForbidOverwrite) { + return bce.NewBceClientError("invalid forbid overwrite val," + symlinkArgs.ForbidOverwrite) + } + req.SetHeader(http.BCE_FORBID_OVERWRITE, symlinkArgs.ForbidOverwrite) + } + + if len(symlinkArgs.StorageClass) != 0 { + if !validStorageClass(symlinkArgs.StorageClass) { + return bce.NewBceClientError("invalid storage class val," + symlinkArgs.StorageClass) + } + if symlinkArgs.StorageClass == STORAGE_CLASS_ARCHIVE { + return bce.NewBceClientError("archive storage class not support") + } + req.SetHeader(http.BCE_STORAGE_CLASS, symlinkArgs.StorageClass) + } + + if len(symlinkArgs.UserMeta) != 0 { + if err := setUserMetadata(req, symlinkArgs.UserMeta); err != nil { + return err + } + } + } + req.SetHeader(http.BCE_SYMLINK_TARGET, object) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutObjectSymlink - put the object from the string or the stream +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - symlinkKey: the name of the symlink +// RETURNS: +// - string: the name of the target object +// - error: nil if ok otherwise the specific error +func GetObjectSymlink(cli bce.Client, bucket string, symlinkKey string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, symlinkKey)) + req.SetParam("symlink", "") + req.SetMethod(http.GET) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return resp.Header(http.BCE_SYMLINK_TARGET), nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go new file mode 100644 index 0000000000000..9be25733e105c --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go @@ -0,0 +1,274 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// util.go - define the utilities for api package of BOS service + +package api + +import ( + "bytes" + net_http "net/http" + "strings" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" +) + +const ( + METADATA_DIRECTIVE_COPY = "copy" + METADATA_DIRECTIVE_REPLACE = "replace" + + STORAGE_CLASS_STANDARD = "STANDARD" + STORAGE_CLASS_STANDARD_IA = "STANDARD_IA" + STORAGE_CLASS_COLD = "COLD" + STORAGE_CLASS_ARCHIVE = "ARCHIVE" + + FETCH_MODE_SYNC = "sync" + FETCH_MODE_ASYNC = "async" + + CANNED_ACL_PRIVATE = "private" + CANNED_ACL_PUBLIC_READ = "public-read" + CANNED_ACL_PUBLIC_READ_WRITE = "public-read-write" + + RAW_CONTENT_TYPE = "application/octet-stream" + + THRESHOLD_100_CONTINUE = 1 << 20 // add 100 continue header if body size bigger than 1MB + + STATUS_ENABLED = "enabled" + STATUS_DISABLED = "disabled" + + ENCRYPTION_AES256 = "AES256" + + RESTORE_TIER_STANDARD = "Standard" //标准取回对象 + RESTORE_TIER_EXPEDITED = "Expedited" //快速取回对象 + + FORBID_OVERWRITE_FALSE = "false" + FORBID_OVERWRITE_TRUE = "true" +) + +var DEFAULT_CNAME_LIKE_LIST = []string{ + ".cdn.bcebos.com", +} + +var VALID_STORAGE_CLASS_TYPE = map[string]int{ + STORAGE_CLASS_STANDARD: 0, + STORAGE_CLASS_STANDARD_IA: 1, + STORAGE_CLASS_COLD: 2, + STORAGE_CLASS_ARCHIVE: 3, +} + +var VALID_RESTORE_TIER = map[string]int{ + RESTORE_TIER_STANDARD: 1, + RESTORE_TIER_EXPEDITED: 1, +} + +var VALID_FORBID_OVERWRITE = map[string]int{ + FORBID_OVERWRITE_FALSE: 1, + FORBID_OVERWRITE_TRUE: 1, +} + +var ( + GET_OBJECT_ALLOWED_RESPONSE_HEADERS = map[string]struct{}{ + "ContentDisposition": {}, + "ContentType": {}, + "ContentLanguage": {}, + "Expires": {}, + "CacheControl": {}, + "ContentEncoding": {}, + } +) + +func getBucketUri(bucketName string) string { + return bce.URI_PREFIX + bucketName +} + +func getObjectUri(bucketName, objectName string) string { + return bce.URI_PREFIX + bucketName + "/" + objectName +} + +func getCnameUri(uri string) string { + if len(uri) <= 0 { + return uri + } + slash_index := strings.Index(uri[1:], "/") + if slash_index == -1 { + return bce.URI_PREFIX + } else { + return uri[slash_index+1:] + } +} + +func validMetadataDirective(val string) bool { + if val == METADATA_DIRECTIVE_COPY || val == METADATA_DIRECTIVE_REPLACE { + return true + } + return false +} + +func validForbidOverwrite(val string) bool { + if _, ok := VALID_FORBID_OVERWRITE[val]; ok { + return true + } + return false +} + +func validStorageClass(val string) bool { + if _, ok := VALID_STORAGE_CLASS_TYPE[val]; ok { + return true + } + return false +} + +func validFetchMode(val string) bool { + if val == FETCH_MODE_SYNC || val == FETCH_MODE_ASYNC { + return true + } + return false +} + +func validCannedAcl(val string) bool { + if val == CANNED_ACL_PRIVATE || + val == CANNED_ACL_PUBLIC_READ || + val == CANNED_ACL_PUBLIC_READ_WRITE { + return true + } + return false +} + +func toHttpHeaderKey(key string) string { + var result bytes.Buffer + needToUpper := true + for _, c := range []byte(key) { + if needToUpper && (c >= 'a' && c <= 'z') { + result.WriteByte(c - 32) + needToUpper = false + } else if c == '-' { + result.WriteByte(c) + needToUpper = true + } else { + result.WriteByte(c) + } + } + return result.String() +} + +func setOptionalNullHeaders(req *bce.BceRequest, args map[string]string) { + for k, v := range args { + if len(v) == 0 { + continue + } + switch k { + case http.CACHE_CONTROL: + fallthrough + case http.CONTENT_DISPOSITION: + fallthrough + case http.CONTENT_ENCODING: + fallthrough + case http.CONTENT_RANGE: + fallthrough + case http.CONTENT_MD5: + fallthrough + case http.CONTENT_TYPE: + fallthrough + case http.EXPIRES: + fallthrough + case http.LAST_MODIFIED: + fallthrough + case http.ETAG: + fallthrough + case http.BCE_OBJECT_TYPE: + fallthrough + case http.BCE_NEXT_APPEND_OFFSET: + fallthrough + case http.BCE_CONTENT_SHA256: + fallthrough + case http.BCE_CONTENT_CRC32: + fallthrough + case http.BCE_COPY_SOURCE_RANGE: + fallthrough + case http.BCE_COPY_SOURCE_IF_MATCH: + fallthrough + case http.BCE_COPY_SOURCE_IF_NONE_MATCH: + fallthrough + case http.BCE_COPY_SOURCE_IF_MODIFIED_SINCE: + fallthrough + case http.BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE: + req.SetHeader(k, v) + } + } +} + +func setUserMetadata(req *bce.BceRequest, meta map[string]string) error { + if meta == nil { + return nil + } + for k, v := range meta { + if len(k) == 0 { + continue + } + if len(k)+len(v) > 32*1024 { + return bce.NewBceClientError("MetadataTooLarge") + } + req.SetHeader(http.BCE_USER_METADATA_PREFIX+k, v) + } + return nil +} + +func isCnameLikeHost(host string) bool { + for _, suffix := range DEFAULT_CNAME_LIKE_LIST { + if strings.HasSuffix(strings.ToLower(host), suffix) { + return true + } + } + return false +} + +func SendRequest(cli bce.Client, req *bce.BceRequest, resp *bce.BceResponse) error { + var ( + err error + need_retry bool + ) + + req.SetEndpoint(cli.GetBceClientConfig().Endpoint) + origin_uri := req.Uri() + // set uri for cname or cdn endpoint + if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(cli.GetBceClientConfig().Endpoint) { + req.SetUri(getCnameUri(origin_uri)) + } + + if err = cli.SendRequest(req, resp); err != nil { + if serviceErr, isServiceErr := err.(*bce.BceServiceError); isServiceErr { + if serviceErr.StatusCode == net_http.StatusInternalServerError || + serviceErr.StatusCode == net_http.StatusBadGateway || + serviceErr.StatusCode == net_http.StatusServiceUnavailable || + (serviceErr.StatusCode == net_http.StatusBadRequest && serviceErr.Code == "Http400") { + need_retry = true + } + } + if _, isClientErr := err.(*bce.BceClientError); isClientErr { + need_retry = true + } + // retry backup endpoint + if need_retry && cli.GetBceClientConfig().BackupEndpoint != "" { + req.SetEndpoint(cli.GetBceClientConfig().BackupEndpoint) + if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(cli.GetBceClientConfig().BackupEndpoint) { + req.SetUri(getCnameUri(origin_uri)) + } + if err = cli.SendRequest(req, resp); err != nil { + return err + } + } + } + return err +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go new file mode 100644 index 0000000000000..375899c7295e8 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go @@ -0,0 +1,2181 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// client.go - define the client for BOS service + +// Package bos defines the BOS services of BCE. The supported APIs are all defined in sub-package +// model with three types: 16 bucket APIs, 9 object APIs and 7 multipart APIs. +package bos + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + + "github.com/baidubce/bce-sdk-go/auth" + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/services/bos/api" + "github.com/baidubce/bce-sdk-go/util/log" +) + +const ( + DEFAULT_SERVICE_DOMAIN = bce.DEFAULT_REGION + ".bcebos.com" + DEFAULT_MAX_PARALLEL = 10 + MULTIPART_ALIGN = 1 << 20 // 1MB + MIN_MULTIPART_SIZE = 1 << 20 // 1MB + DEFAULT_MULTIPART_SIZE = 12 * (1 << 20) // 12MB + + MAX_PART_NUMBER = 10000 + MAX_SINGLE_PART_SIZE = 5 * (1 << 30) // 5GB + MAX_SINGLE_OBJECT_SIZE = 5 * (1 << 40) // 5TB +) + +// Client of BOS service is a kind of BceClient, so derived from BceClient +type Client struct { + *bce.BceClient + + // Fileds that used in parallel operation for BOS service + MaxParallel int64 + MultipartSize int64 +} + +// BosClientConfiguration defines the config components structure by user. +type BosClientConfiguration struct { + Ak string + Sk string + Endpoint string + RedirectDisabled bool +} + +// NewClient make the BOS service client with default configuration. +// Use `cli.Config.xxx` to access the config or change it to non-default value. +func NewClient(ak, sk, endpoint string) (*Client, error) { + return NewClientWithConfig(&BosClientConfiguration{ + Ak: ak, + Sk: sk, + Endpoint: endpoint, + RedirectDisabled: false, + }) +} +func NewClientWithConfig(config *BosClientConfiguration) (*Client, error) { + var credentials *auth.BceCredentials + var err error + ak, sk, endpoint := config.Ak, config.Sk, config.Endpoint + if len(ak) == 0 && len(sk) == 0 { // to support public-read-write request + credentials, err = nil, nil + } else { + credentials, err = auth.NewBceCredentials(ak, sk) + if err != nil { + return nil, err + } + } + if len(endpoint) == 0 { + endpoint = DEFAULT_SERVICE_DOMAIN + } + defaultSignOptions := &auth.SignOptions{ + HeadersToSign: auth.DEFAULT_HEADERS_TO_SIGN, + ExpireSeconds: auth.DEFAULT_EXPIRE_SECONDS} + defaultConf := &bce.BceClientConfiguration{ + Endpoint: endpoint, + Region: bce.DEFAULT_REGION, + UserAgent: bce.DEFAULT_USER_AGENT, + Credentials: credentials, + SignOption: defaultSignOptions, + Retry: bce.DEFAULT_RETRY_POLICY, + ConnectionTimeoutInMillis: bce.DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS, + RedirectDisabled: config.RedirectDisabled} + v1Signer := &auth.BceV1Signer{} + + client := &Client{bce.NewBceClient(defaultConf, v1Signer), + DEFAULT_MAX_PARALLEL, DEFAULT_MULTIPART_SIZE} + return client, nil +} + +// ListBuckets - list all buckets +// +// RETURNS: +// - *api.ListBucketsResult: the all buckets +// - error: the return error if any occurs +func (c *Client) ListBuckets() (*api.ListBucketsResult, error) { + return api.ListBuckets(c) +} + +// ListObjects - list all objects of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - args: the optional arguments to list objects +// RETURNS: +// - *api.ListObjectsResult: the all objects of the bucket +// - error: the return error if any occurs +func (c *Client) ListObjects(bucket string, + args *api.ListObjectsArgs) (*api.ListObjectsResult, error) { + return api.ListObjects(c, bucket, args) +} + +// SimpleListObjects - list all objects of the given bucket with simple arguments +// +// PARAMS: +// - bucket: the bucket name +// - prefix: the prefix for listing +// - maxKeys: the max number of result objects +// - marker: the marker to mark the beginning for the listing +// - delimiter: the delimiter for list objects +// RETURNS: +// - *api.ListObjectsResult: the all objects of the bucket +// - error: the return error if any occurs +func (c *Client) SimpleListObjects(bucket, prefix string, maxKeys int, marker, + delimiter string) (*api.ListObjectsResult, error) { + args := &api.ListObjectsArgs{delimiter, marker, maxKeys, prefix} + return api.ListObjects(c, bucket, args) +} + +// HeadBucket - test the given bucket existed and access authority +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if exists and have authority otherwise the specific error +func (c *Client) HeadBucket(bucket string) error { + return api.HeadBucket(c, bucket) +} + +// DoesBucketExist - test the given bucket existed or not +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - bool: true if exists and false if not exists or occurs error +// - error: nil if exists or not exist, otherwise the specific error +func (c *Client) DoesBucketExist(bucket string) (bool, error) { + err := api.HeadBucket(c, bucket) + if err == nil { + return true, nil + } + if realErr, ok := err.(*bce.BceServiceError); ok { + if realErr.StatusCode == http.StatusForbidden { + return true, nil + } + if realErr.StatusCode == http.StatusNotFound { + return false, nil + } + } + return false, err +} + +// PutBucket - create a new bucket +// +// PARAMS: +// - bucket: the new bucket name +// RETURNS: +// - string: the location of the new bucket if create success +// - error: nil if create success otherwise the specific error +func (c *Client) PutBucket(bucket string) (string, error) { + return api.PutBucket(c, bucket) +} + +// DeleteBucket - delete a empty bucket +// +// PARAMS: +// - bucket: the bucket name to be deleted +// RETURNS: +// - error: nil if delete success otherwise the specific error +func (c *Client) DeleteBucket(bucket string) error { + return api.DeleteBucket(c, bucket) +} + +// GetBucketLocation - get the location fo the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - string: the location of the bucket +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketLocation(bucket string) (string, error) { + return api.GetBucketLocation(c, bucket) +} + +// PutBucketAcl - set the acl of the given bucket with acl body stream +// +// PARAMS: +// - bucket: the bucket name +// - aclBody: the acl json body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAcl(bucket string, aclBody *bce.Body) error { + return api.PutBucketAcl(c, bucket, "", aclBody) +} + +// PutBucketAclFromCanned - set the canned acl of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - cannedAcl: the cannedAcl string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromCanned(bucket, cannedAcl string) error { + return api.PutBucketAcl(c, bucket, cannedAcl, nil) +} + +// PutBucketAclFromFile - set the acl of the given bucket with acl json file name +// +// PARAMS: +// - bucket: the bucket name +// - aclFile: the acl file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromFile(bucket, aclFile string) error { + body, err := bce.NewBodyFromFile(aclFile) + if err != nil { + return err + } + return api.PutBucketAcl(c, bucket, "", body) +} + +// PutBucketAclFromString - set the acl of the given bucket with acl json string +// +// PARAMS: +// - bucket: the bucket name +// - aclString: the acl string with json format +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromString(bucket, aclString string) error { + body, err := bce.NewBodyFromString(aclString) + if err != nil { + return err + } + return api.PutBucketAcl(c, bucket, "", body) +} + +// PutBucketAclFromStruct - set the acl of the given bucket with acl data structure +// +// PARAMS: +// - bucket: the bucket name +// - aclObj: the acl struct object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromStruct(bucket string, aclObj *api.PutBucketAclArgs) error { + jsonBytes, jsonErr := json.Marshal(aclObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketAcl(c, bucket, "", body) +} + +// GetBucketAcl - get the acl of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketAclResult: the result of the bucket acl +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketAcl(bucket string) (*api.GetBucketAclResult, error) { + return api.GetBucketAcl(c, bucket) +} + +// PutBucketLogging - set the loging setting of the given bucket with json stream +// +// PARAMS: +// - bucket: the bucket name +// - body: the json body +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLogging(bucket string, body *bce.Body) error { + return api.PutBucketLogging(c, bucket, body) +} + +// PutBucketLoggingFromString - set the loging setting of the given bucket with json string +// +// PARAMS: +// - bucket: the bucket name +// - logging: the json format string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLoggingFromString(bucket, logging string) error { + body, err := bce.NewBodyFromString(logging) + if err != nil { + return err + } + return api.PutBucketLogging(c, bucket, body) +} + +// PutBucketLoggingFromStruct - set the loging setting of the given bucket with args object +// +// PARAMS: +// - bucket: the bucket name +// - obj: the logging setting object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLoggingFromStruct(bucket string, obj *api.PutBucketLoggingArgs) error { + jsonBytes, jsonErr := json.Marshal(obj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketLogging(c, bucket, body) +} + +// GetBucketLogging - get the logging setting of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketLoggingResult: the logging setting of the bucket +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketLogging(bucket string) (*api.GetBucketLoggingResult, error) { + return api.GetBucketLogging(c, bucket) +} + +// DeleteBucketLogging - delete the logging setting of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketLogging(bucket string) error { + return api.DeleteBucketLogging(c, bucket) +} + +// PutBucketLifecycle - set the lifecycle rule of the given bucket with raw stream +// +// PARAMS: +// - bucket: the bucket name +// - lifecycle: the lifecycle rule json body +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLifecycle(bucket string, lifecycle *bce.Body) error { + return api.PutBucketLifecycle(c, bucket, lifecycle) +} + +// PutBucketLifecycleFromString - set the lifecycle rule of the given bucket with string +// +// PARAMS: +// - bucket: the bucket name +// - lifecycle: the lifecycle rule json format string body +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLifecycleFromString(bucket, lifecycle string) error { + body, err := bce.NewBodyFromString(lifecycle) + if err != nil { + return err + } + return api.PutBucketLifecycle(c, bucket, body) +} + +// GetBucketLifecycle - get the lifecycle rule of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketLifecycleResult: the lifecycle rule of the bucket +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketLifecycle(bucket string) (*api.GetBucketLifecycleResult, error) { + return api.GetBucketLifecycle(c, bucket) +} + +// DeleteBucketLifecycle - delete the lifecycle rule of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketLifecycle(bucket string) error { + return api.DeleteBucketLifecycle(c, bucket) +} + +// PutBucketStorageclass - set the storage class of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - storageClass: the storage class string value +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStorageclass(bucket, storageClass string) error { + return api.PutBucketStorageclass(c, bucket, storageClass) +} + +// GetBucketStorageclass - get the storage class of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - string: the storage class string value +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketStorageclass(bucket string) (string, error) { + return api.GetBucketStorageclass(c, bucket) +} + +// PutBucketReplication - set the bucket replication config of different region +// +// PARAMS: +// - bucket: the bucket name +// - replicationConf: the replication config json body stream +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplication(bucket string, replicationConf *bce.Body, replicationRuleId string) error { + return api.PutBucketReplication(c, bucket, replicationConf, replicationRuleId) +} + +// PutBucketReplicationFromFile - set the bucket replication config with json file name +// +// PARAMS: +// - bucket: the bucket name +// - confFile: the config json file name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplicationFromFile(bucket, confFile string, replicationRuleId string) error { + body, err := bce.NewBodyFromFile(confFile) + if err != nil { + return err + } + return api.PutBucketReplication(c, bucket, body, replicationRuleId) +} + +// PutBucketReplicationFromString - set the bucket replication config with json string +// +// PARAMS: +// - bucket: the bucket name +// - confString: the config string with json format +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplicationFromString(bucket, confString string, replicationRuleId string) error { + body, err := bce.NewBodyFromString(confString) + if err != nil { + return err + } + return api.PutBucketReplication(c, bucket, body, replicationRuleId) +} + +// PutBucketReplicationFromStruct - set the bucket replication config with struct +// +// PARAMS: +// - bucket: the bucket name +// - confObj: the replication config struct object +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplicationFromStruct(bucket string, + confObj *api.PutBucketReplicationArgs, replicationRuleId string) error { + jsonBytes, jsonErr := json.Marshal(confObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketReplication(c, bucket, body, replicationRuleId) +} + +// GetBucketReplication - get the bucket replication config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *api.GetBucketReplicationResult: the result of the bucket replication config +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketReplication(bucket string, replicationRuleId string) (*api.GetBucketReplicationResult, error) { + return api.GetBucketReplication(c, bucket, replicationRuleId) +} + +// ListBucketReplication - get all replication config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.ListBucketReplicationResult: the list of the bucket replication config +// - error: nil if success otherwise the specific error +func (c *Client) ListBucketReplication(bucket string) (*api.ListBucketReplicationResult, error) { + return api.ListBucketReplication(c, bucket) +} + +// DeleteBucketReplication - delete the bucket replication config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketReplication(bucket string, replicationRuleId string) error { + return api.DeleteBucketReplication(c, bucket, replicationRuleId) +} + +// GetBucketReplicationProgress - get the bucket replication process of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *api.GetBucketReplicationProgressResult: the process of the bucket replication +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketReplicationProgress(bucket string, replicationRuleId string) ( + *api.GetBucketReplicationProgressResult, error) { + return api.GetBucketReplicationProgress(c, bucket, replicationRuleId) +} + +// PutBucketEncryption - set the bucket encryption config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - algorithm: the encryption algorithm name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketEncryption(bucket, algorithm string) error { + return api.PutBucketEncryption(c, bucket, algorithm) +} + +// GetBucketEncryption - get the bucket encryption config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - string: the encryption algorithm name +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketEncryption(bucket string) (string, error) { + return api.GetBucketEncryption(c, bucket) +} + +// DeleteBucketEncryption - delete the bucket encryption config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketEncryption(bucket string) error { + return api.DeleteBucketEncryption(c, bucket) +} + +// PutBucketStaticWebsite - set the bucket static website config +// +// PARAMS: +// - bucket: the bucket name +// - config: the static website config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStaticWebsite(bucket string, config *bce.Body) error { + return api.PutBucketStaticWebsite(c, bucket, config) +} + +// PutBucketStaticWebsiteFromString - set the bucket static website config from json string +// +// PARAMS: +// - bucket: the bucket name +// - jsonConfig: the static website config json string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStaticWebsiteFromString(bucket, jsonConfig string) error { + body, err := bce.NewBodyFromString(jsonConfig) + if err != nil { + return err + } + return api.PutBucketStaticWebsite(c, bucket, body) +} + +// PutBucketStaticWebsiteFromStruct - set the bucket static website config from struct +// +// PARAMS: +// - bucket: the bucket name +// - confObj: the static website config object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStaticWebsiteFromStruct(bucket string, + confObj *api.PutBucketStaticWebsiteArgs) error { + jsonBytes, jsonErr := json.Marshal(confObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketStaticWebsite(c, bucket, body) +} + +// SimplePutBucketStaticWebsite - simple set the bucket static website config +// +// PARAMS: +// - bucket: the bucket name +// - index: the static website config for index file name +// - notFound: the static website config for notFound file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) SimplePutBucketStaticWebsite(bucket, index, notFound string) error { + confObj := &api.PutBucketStaticWebsiteArgs{index, notFound} + return c.PutBucketStaticWebsiteFromStruct(bucket, confObj) +} + +// GetBucketStaticWebsite - get the bucket static website config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - result: the static website config result object +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketStaticWebsite(bucket string) ( + *api.GetBucketStaticWebsiteResult, error) { + return api.GetBucketStaticWebsite(c, bucket) +} + +// DeleteBucketStaticWebsite - delete the bucket static website config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketStaticWebsite(bucket string) error { + return api.DeleteBucketStaticWebsite(c, bucket) +} + +// PutBucketCors - set the bucket CORS config +// +// PARAMS: +// - bucket: the bucket name +// - config: the bucket CORS config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCors(bucket string, config *bce.Body) error { + return api.PutBucketCors(c, bucket, config) +} + +// PutBucketCorsFromFile - set the bucket CORS config from json config file +// +// PARAMS: +// - bucket: the bucket name +// - filename: the bucket CORS json config file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCorsFromFile(bucket, filename string) error { + body, err := bce.NewBodyFromFile(filename) + if err != nil { + return err + } + return api.PutBucketCors(c, bucket, body) +} + +// PutBucketCorsFromString - set the bucket CORS config from json config string +// +// PARAMS: +// - bucket: the bucket name +// - filename: the bucket CORS json config string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCorsFromString(bucket, jsonConfig string) error { + body, err := bce.NewBodyFromString(jsonConfig) + if err != nil { + return err + } + return api.PutBucketCors(c, bucket, body) +} + +// PutBucketCorsFromStruct - set the bucket CORS config from json config object +// +// PARAMS: +// - bucket: the bucket name +// - filename: the bucket CORS json config object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCorsFromStruct(bucket string, confObj *api.PutBucketCorsArgs) error { + jsonBytes, jsonErr := json.Marshal(confObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketCors(c, bucket, body) +} + +// GetBucketCors - get the bucket CORS config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - result: the bucket CORS config result object +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketCors(bucket string) (*api.GetBucketCorsResult, error) { + return api.GetBucketCors(c, bucket) +} + +// DeleteBucketCors - delete the bucket CORS config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketCors(bucket string) error { + return api.DeleteBucketCors(c, bucket) +} + +// PutBucketCopyrightProtection - set the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - resources: the resource items in the bucket to be protected +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCopyrightProtection(bucket string, resources ...string) error { + return api.PutBucketCopyrightProtection(c, bucket, resources...) +} + +// GetBucketCopyrightProtection - get the bucket copyright protection config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - result: the bucket copyright protection config resources +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketCopyrightProtection(bucket string) ([]string, error) { + return api.GetBucketCopyrightProtection(c, bucket) +} + +// DeleteBucketCopyrightProtection - delete the bucket copyright protection config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketCopyrightProtection(bucket string) error { + return api.DeleteBucketCopyrightProtection(c, bucket) +} + +// PutObject - upload a new object or rewrite the existed object with raw stream +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - body: the object content body +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObject(bucket, object string, body *bce.Body, + args *api.PutObjectArgs) (string, error) { + return api.PutObject(c, bucket, object, body, args) +} + +// BasicPutObject - the basic interface of uploading an object +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - body: the object content body +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) BasicPutObject(bucket, object string, body *bce.Body) (string, error) { + return api.PutObject(c, bucket, object, body, nil) +} + +// PutObjectFromBytes - upload a new object or rewrite the existed object from a byte array +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - bytesArr: the content byte array +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromBytes(bucket, object string, bytesArr []byte, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromBytes(bytesArr) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// PutObjectFromString - upload a new object or rewrite the existed object from a string +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - content: the content string +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromString(bucket, object, content string, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromString(content) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// PutObjectFromFile - upload a new object or rewrite the existed object from a local file +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - fileName: the local file full path name +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromFile(bucket, object, fileName string, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromFile(fileName) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// PutObjectFromStream - upload a new object or rewrite the existed object from stream +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - fileName: the local file full path name +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromStream(bucket, object string, reader io.Reader, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromSizedReader(reader, -1) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// CopyObject - copy a remote object to another one +// +// PARAMS: +// - bucket: the name of the destination bucket +// - object: the name of the destination object +// - srcBucket: the name of the source bucket +// - srcObject: the name of the source object +// - args: the optional arguments for copying object which are MetadataDirective, StorageClass, +// IfMatch, IfNoneMatch, ifModifiedSince, IfUnmodifiedSince +// RETURNS: +// - *api.CopyObjectResult: result struct which contains "ETag" and "LastModified" fields +// - error: any error if it occurs +func (c *Client) CopyObject(bucket, object, srcBucket, srcObject string, + args *api.CopyObjectArgs) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.CopyObject(c, bucket, object, source, args) +} + +// BasicCopyObject - the basic interface of copying a object to another one +// +// PARAMS: +// - bucket: the name of the destination bucket +// - object: the name of the destination object +// - srcBucket: the name of the source bucket +// - srcObject: the name of the source object +// RETURNS: +// - *api.CopyObjectResult: result struct which contains "ETag" and "LastModified" fields +// - error: any error if it occurs +func (c *Client) BasicCopyObject(bucket, object, srcBucket, + srcObject string) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.CopyObject(c, bucket, object, source, nil) +} + +// GetObject - get the given object with raw stream return +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - responseHeaders: the optional response headers to get the given object +// - ranges: the optional range start and end to get the given object +// RETURNS: +// - *api.GetObjectResult: result struct which contains "Body" and header fields +// for details reference https://cloud.baidu.com/doc/BOS/API.html#GetObject.E6.8E.A5.E5.8F.A3 +// - error: any error if it occurs +func (c *Client) GetObject(bucket, object string, responseHeaders map[string]string, + ranges ...int64) (*api.GetObjectResult, error) { + return api.GetObject(c, bucket, object, responseHeaders, ranges...) +} + +// BasicGetObject - the basic interface of geting the given object +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// RETURNS: +// - *api.GetObjectResult: result struct which contains "Body" and header fields +// for details reference https://cloud.baidu.com/doc/BOS/API.html#GetObject.E6.8E.A5.E5.8F.A3 +// - error: any error if it occurs +func (c *Client) BasicGetObject(bucket, object string) (*api.GetObjectResult, error) { + return api.GetObject(c, bucket, object, nil) +} + +// BasicGetObjectToFile - use basic interface to get the given object to the given file path +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - filePath: the file path to store the object content +// RETURNS: +// - error: any error if it occurs +func (c *Client) BasicGetObjectToFile(bucket, object, filePath string) error { + res, err := api.GetObject(c, bucket, object, nil) + if err != nil { + return err + } + defer res.Body.Close() + + file, fileErr := os.OpenFile(filePath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if fileErr != nil { + return fileErr + } + defer file.Close() + + written, writeErr := io.CopyN(file, res.Body, res.ContentLength) + if writeErr != nil { + return writeErr + } + if written != res.ContentLength { + return fmt.Errorf("written content size does not match the response content") + } + return nil +} + +// GetObjectMeta - get the given object metadata +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// RETURNS: +// - *api.GetObjectMetaResult: metadata result, for details reference +// https://cloud.baidu.com/doc/BOS/API.html#GetObjectMeta.E6.8E.A5.E5.8F.A3 +// - error: any error if it occurs +func (c *Client) GetObjectMeta(bucket, object string) (*api.GetObjectMetaResult, error) { + return api.GetObjectMeta(c, bucket, object) +} + +// SelectObject - select the object content +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - args: the optional arguments to select the object +// RETURNS: +// - *api.SelectObjectResult: select object result +// - error: any error if it occurs +func (c *Client) SelectObject(bucket, object string, args *api.SelectObjectArgs) (*api.SelectObjectResult, error) { + return api.SelectObject(c, bucket, object, args) +} + +// FetchObject - fetch the object content from the given source and store +// +// PARAMS: +// - bucket: the name of the bucket to store +// - object: the name of the object to store +// - source: fetch source url +// - args: the optional arguments to fetch the object +// RETURNS: +// - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields +// - error: any error if it occurs +func (c *Client) FetchObject(bucket, object, source string, + args *api.FetchObjectArgs) (*api.FetchObjectResult, error) { + return api.FetchObject(c, bucket, object, source, args) +} + +// BasicFetchObject - the basic interface of the fetch object api +// +// PARAMS: +// - bucket: the name of the bucket to store +// - object: the name of the object to store +// - source: fetch source url +// RETURNS: +// - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields +// - error: any error if it occurs +func (c *Client) BasicFetchObject(bucket, object, source string) (*api.FetchObjectResult, error) { + return api.FetchObject(c, bucket, object, source, nil) +} + +// SimpleFetchObject - fetch object with simple arguments interface +// +// PARAMS: +// - bucket: the name of the bucket to store +// - object: the name of the object to store +// - source: fetch source url +// - mode: fetch mode which supports sync and async +// - storageClass: the storage class of the fetched object +// RETURNS: +// - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields +// - error: any error if it occurs +func (c *Client) SimpleFetchObject(bucket, object, source, mode, + storageClass string) (*api.FetchObjectResult, error) { + args := &api.FetchObjectArgs{mode, storageClass} + return api.FetchObject(c, bucket, object, source, args) +} + +// AppendObject - append the given content to a new or existed object which is appendable +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - content: the append object stream +// - args: the optional arguments to append object +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) AppendObject(bucket, object string, content *bce.Body, + args *api.AppendObjectArgs) (*api.AppendObjectResult, error) { + return api.AppendObject(c, bucket, object, content, args) +} + +// SimpleAppendObject - the interface to append object with simple offset argument +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - content: the append object stream +// - offset: the offset of where to append +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) SimpleAppendObject(bucket, object string, content *bce.Body, + offset int64) (*api.AppendObjectResult, error) { + return api.AppendObject(c, bucket, object, content, &api.AppendObjectArgs{Offset: offset}) +} + +// SimpleAppendObjectFromString - the simple interface of appending an object from a string +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - content: the object string to append +// - offset: the offset of where to append +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) SimpleAppendObjectFromString(bucket, object, content string, + offset int64) (*api.AppendObjectResult, error) { + body, err := bce.NewBodyFromString(content) + if err != nil { + return nil, err + } + return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}) +} + +// SimpleAppendObjectFromFile - the simple interface of appending an object from a file +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - filePath: the full file path +// - offset: the offset of where to append +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) SimpleAppendObjectFromFile(bucket, object, filePath string, + offset int64) (*api.AppendObjectResult, error) { + body, err := bce.NewBodyFromFile(filePath) + if err != nil { + return nil, err + } + return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}) +} + +// DeleteObject - delete the given object +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - object: the name of the object to delete +// RETURNS: +// - error: any error if it occurs +func (c *Client) DeleteObject(bucket, object string) error { + return api.DeleteObject(c, bucket, object) +} + +// DeleteMultipleObjects - delete a list of objects +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - objectListStream: the object list stream to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjects(bucket string, + objectListStream *bce.Body) (*api.DeleteMultipleObjectsResult, error) { + return api.DeleteMultipleObjects(c, bucket, objectListStream) +} + +// DeleteMultipleObjectsFromString - delete a list of objects with json format string +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - objectListString: the object list string to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjectsFromString(bucket, + objectListString string) (*api.DeleteMultipleObjectsResult, error) { + body, err := bce.NewBodyFromString(objectListString) + if err != nil { + return nil, err + } + return api.DeleteMultipleObjects(c, bucket, body) +} + +// DeleteMultipleObjectsFromStruct - delete a list of objects with object list struct +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - objectListStruct: the object list struct to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjectsFromStruct(bucket string, + objectListStruct *api.DeleteMultipleObjectsArgs) (*api.DeleteMultipleObjectsResult, error) { + jsonBytes, jsonErr := json.Marshal(objectListStruct) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + return api.DeleteMultipleObjects(c, bucket, body) +} + +// DeleteMultipleObjectsFromKeyList - delete a list of objects with given key string array +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - keyList: the key string list to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjectsFromKeyList(bucket string, + keyList []string) (*api.DeleteMultipleObjectsResult, error) { + if len(keyList) == 0 { + return nil, fmt.Errorf("the key list to be deleted is empty") + } + args := make([]api.DeleteObjectArgs, len(keyList)) + for i, k := range keyList { + args[i].Key = k + } + argsContainer := &api.DeleteMultipleObjectsArgs{args} + + jsonBytes, jsonErr := json.Marshal(argsContainer) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + return api.DeleteMultipleObjects(c, bucket, body) +} + +// InitiateMultipartUpload - initiate a multipart upload to get a upload ID +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - contentType: the content type of the object to be uploaded which should be specified, +// otherwise use the default(application/octet-stream) +// - args: the optional arguments +// RETURNS: +// - *InitiateMultipartUploadResult: the result data structure +// - error: nil if ok otherwise the specific error +func (c *Client) InitiateMultipartUpload(bucket, object, contentType string, + args *api.InitiateMultipartUploadArgs) (*api.InitiateMultipartUploadResult, error) { + return api.InitiateMultipartUpload(c, bucket, object, contentType, args) +} + +// BasicInitiateMultipartUpload - basic interface to initiate a multipart upload +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - *InitiateMultipartUploadResult: the result data structure +// - error: nil if ok otherwise the specific error +func (c *Client) BasicInitiateMultipartUpload(bucket, + object string) (*api.InitiateMultipartUploadResult, error) { + return api.InitiateMultipartUpload(c, bucket, object, "", nil) +} + +// UploadPart - upload the single part in the multipart upload process +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func (c *Client) UploadPart(bucket, object, uploadId string, partNumber int, + content *bce.Body, args *api.UploadPartArgs) (string, error) { + return api.UploadPart(c, bucket, object, uploadId, partNumber, content, args) +} + +// BasicUploadPart - basic interface to upload the single part in the multipart upload process +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func (c *Client) BasicUploadPart(bucket, object, uploadId string, partNumber int, + content *bce.Body) (string, error) { + return api.UploadPart(c, bucket, object, uploadId, partNumber, content, nil) +} + +// UploadPartFromBytes - upload the single part in the multipart upload process +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func (c *Client) UploadPartFromBytes(bucket, object, uploadId string, partNumber int, + content []byte, args *api.UploadPartArgs) (string, error) { + return api.UploadPartFromBytes(c, bucket, object, uploadId, partNumber, content, args) +} + +// UploadPartCopy - copy the multipart object +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - srcBucket: the source bucket +// - srcObject: the source object +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - args: the optional arguments +// RETURNS: +// - *CopyObjectResult: the lastModified and eTag of the part +// - error: nil if ok otherwise the specific error +func (c *Client) UploadPartCopy(bucket, object, srcBucket, srcObject, uploadId string, + partNumber int, args *api.UploadPartCopyArgs) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args) +} + +// BasicUploadPartCopy - basic interface to copy the multipart object +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - srcBucket: the source bucket +// - srcObject: the source object +// - uploadId: the multipart upload id +// - partNumber: the current part number +// RETURNS: +// - *CopyObjectResult: the lastModified and eTag of the part +// - error: nil if ok otherwise the specific error +func (c *Client) BasicUploadPartCopy(bucket, object, srcBucket, srcObject, uploadId string, + partNumber int) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, nil) +} + +// CompleteMultipartUpload - finish a multipart upload operation with parts stream +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - parts: all parts info stream +// - meta: user defined meta data +// RETURNS: +// - *CompleteMultipartUploadResult: the result data +// - error: nil if ok otherwise the specific error +func (c *Client) CompleteMultipartUpload(bucket, object, uploadId string, + body *bce.Body, args *api.CompleteMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { + return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args) +} + +// CompleteMultipartUploadFromStruct - finish a multipart upload operation with parts struct +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - args: args info struct object +// RETURNS: +// - *CompleteMultipartUploadResult: the result data +// - error: nil if ok otherwise the specific error +func (c *Client) CompleteMultipartUploadFromStruct(bucket, object, uploadId string, + args *api.CompleteMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { + jsonBytes, jsonErr := json.Marshal(args) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args) +} + +// AbortMultipartUpload - abort a multipart upload operation +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *Client) AbortMultipartUpload(bucket, object, uploadId string) error { + return api.AbortMultipartUpload(c, bucket, object, uploadId) +} + +// ListParts - list the successfully uploaded parts info by upload id +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - args: the optional arguments +// RETURNS: +// - *ListPartsResult: the uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) ListParts(bucket, object, uploadId string, + args *api.ListPartsArgs) (*api.ListPartsResult, error) { + return api.ListParts(c, bucket, object, uploadId, args) +} + +// BasicListParts - basic interface to list the successfully uploaded parts info by upload id +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// RETURNS: +// - *ListPartsResult: the uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) BasicListParts(bucket, object, uploadId string) (*api.ListPartsResult, error) { + return api.ListParts(c, bucket, object, uploadId, nil) +} + +// ListMultipartUploads - list the unfinished uploaded parts of the given bucket +// +// PARAMS: +// - bucket: the destination bucket name +// - args: the optional arguments +// RETURNS: +// - *ListMultipartUploadsResult: the unfinished uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) ListMultipartUploads(bucket string, + args *api.ListMultipartUploadsArgs) (*api.ListMultipartUploadsResult, error) { + return api.ListMultipartUploads(c, bucket, args) +} + +// BasicListMultipartUploads - basic interface to list the unfinished uploaded parts +// +// PARAMS: +// - bucket: the destination bucket name +// RETURNS: +// - *ListMultipartUploadsResult: the unfinished uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) BasicListMultipartUploads(bucket string) ( + *api.ListMultipartUploadsResult, error) { + return api.ListMultipartUploads(c, bucket, nil) +} + +// UploadSuperFile - parallel upload the super file by using the multipart upload interface +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - fileName: the local full path filename of the super file +// - storageClass: the storage class to be set to the uploaded file +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *Client) UploadSuperFile(bucket, object, fileName, storageClass string) error { + // Get the file size and check the size for multipart upload + file, fileErr := os.Open(fileName) + if fileErr != nil { + return fileErr + } + oldTimeout := c.Config.ConnectionTimeoutInMillis + c.Config.ConnectionTimeoutInMillis = 0 + defer func() { + c.Config.ConnectionTimeoutInMillis = oldTimeout + file.Close() + }() + fileInfo, infoErr := file.Stat() + if infoErr != nil { + return infoErr + } + size := fileInfo.Size() + if size < MIN_MULTIPART_SIZE || c.MultipartSize < MIN_MULTIPART_SIZE { + return bce.NewBceClientError("multipart size should not be less than 1MB") + } + + // Calculate part size and total part number + partSize := (c.MultipartSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum := (size + partSize - 1) / partSize + if partNum > MAX_PART_NUMBER { + partSize = (size + MAX_PART_NUMBER - 1) / MAX_PART_NUMBER + partSize = (partSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum = (size + partSize - 1) / partSize + } + log.Debugf("starting upload super file, total parts: %d, part size: %d", partNum, partSize) + + // Inner wrapper function of parallel uploading each part to get the ETag of the part + uploadPart := func(bucket, object, uploadId string, partNumber int, body *bce.Body, + result chan *api.UploadInfoType, ret chan error, id int64, pool chan int64) { + etag, err := c.BasicUploadPart(bucket, object, uploadId, partNumber, body) + if err != nil { + result <- nil + ret <- err + } else { + result <- &api.UploadInfoType{partNumber, etag} + } + pool <- id + } + + // Do the parallel multipart upload + resp, err := c.InitiateMultipartUpload(bucket, object, "", + &api.InitiateMultipartUploadArgs{StorageClass: storageClass}) + if err != nil { + return err + } + uploadId := resp.UploadId + uploadedResult := make(chan *api.UploadInfoType, partNum) + retChan := make(chan error, partNum) + workerPool := make(chan int64, c.MaxParallel) + for i := int64(0); i < c.MaxParallel; i++ { + workerPool <- i + } + for partId := int64(1); partId <= partNum; partId++ { + uploadSize := partSize + offset := (partId - 1) * partSize + left := size - offset + if uploadSize > left { + uploadSize = left + } + partBody, _ := bce.NewBodyFromSectionFile(file, offset, uploadSize) + select { // wait until get a worker to upload + case workerId := <-workerPool: + go uploadPart(bucket, object, uploadId, int(partId), partBody, + uploadedResult, retChan, workerId, workerPool) + case uploadPartErr := <-retChan: + c.AbortMultipartUpload(bucket, object, uploadId) + return uploadPartErr + } + } + + // Check the return of each part uploading, and decide to complete or abort it + completeArgs := &api.CompleteMultipartUploadArgs{ + Parts: make([]api.UploadInfoType, partNum), + } + for i := partNum; i > 0; i-- { + uploaded := <-uploadedResult + if uploaded == nil { // error occurs and not be caught in `select' statement + c.AbortMultipartUpload(bucket, object, uploadId) + return <-retChan + } + completeArgs.Parts[uploaded.PartNumber-1] = *uploaded + log.Debugf("upload part %d success, etag: %s", uploaded.PartNumber, uploaded.ETag) + } + if _, err := c.CompleteMultipartUploadFromStruct(bucket, object, + uploadId, completeArgs); err != nil { + c.AbortMultipartUpload(bucket, object, uploadId) + return err + } + return nil +} + +// DownloadSuperFile - parallel download the super file using the get object with range +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - fileName: the local full path filename to store the object +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *Client) DownloadSuperFile(bucket, object, fileName string) (err error) { + file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + if err != nil { + return + } + oldTimeout := c.Config.ConnectionTimeoutInMillis + c.Config.ConnectionTimeoutInMillis = 0 + defer func() { + c.Config.ConnectionTimeoutInMillis = oldTimeout + file.Close() + if err != nil { + os.Remove(fileName) + } + }() + + meta, err := c.GetObjectMeta(bucket, object) + if err != nil { + return + } + size := meta.ContentLength + partSize := (c.MultipartSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum := (size + partSize - 1) / partSize + log.Debugf("starting download super file, total parts: %d, part size: %d", partNum, partSize) + + doneChan := make(chan struct{}, partNum) + abortChan := make(chan struct{}) + + // Set up multiple goroutine workers to download the object + workerPool := make(chan int64, c.MaxParallel) + for i := int64(0); i < c.MaxParallel; i++ { + workerPool <- i + } + for i := int64(0); i < partNum; i++ { + rangeStart := i * partSize + rangeEnd := (i+1)*partSize - 1 + if rangeEnd > size-1 { + rangeEnd = size - 1 + } + select { + case workerId := <-workerPool: + go func(rangeStart, rangeEnd, workerId int64) { + res, rangeGetErr := c.GetObject(bucket, object, nil, rangeStart, rangeEnd) + if rangeGetErr != nil { + log.Errorf("download object part(offset:%d, size:%d) failed: %v", + rangeStart, res.ContentLength, rangeGetErr) + abortChan <- struct{}{} + err = rangeGetErr + return + } + defer res.Body.Close() + log.Debugf("writing part %d with offset=%d, size=%d", rangeStart/partSize, + rangeStart, res.ContentLength) + buf := make([]byte, 4096) + offset := rangeStart + for { + n, e := res.Body.Read(buf) + if e != nil && e != io.EOF { + abortChan <- struct{}{} + err = e + return + } + if n == 0 { + break + } + if _, writeErr := file.WriteAt(buf[:n], offset); writeErr != nil { + abortChan <- struct{}{} + err = writeErr + return + } + offset += int64(n) + } + log.Debugf("writing part %d done", rangeStart/partSize) + workerPool <- workerId + doneChan <- struct{}{} + }(rangeStart, rangeEnd, workerId) + case <-abortChan: // abort range get if error occurs during downloading any part + return + } + } + + // Wait for writing to local file done + for i := partNum; i > 0; i-- { + <-doneChan + } + return nil +} + +// GeneratePresignedUrl - generate an authorization url with expire time and optional arguments +// +// PARAMS: +// - bucket: the target bucket name +// - object: the target object name +// - expireInSeconds: the expire time in seconds of the signed url +// - method: optional sign method, default is GET +// - headers: optional sign headers, default just set the Host +// - params: optional sign params, default is empty +// RETURNS: +// - string: the presigned url with authorization string +func (c *Client) GeneratePresignedUrl(bucket, object string, expireInSeconds int, method string, + headers, params map[string]string) string { + return api.GeneratePresignedUrl(c.Config, c.Signer, bucket, object, + expireInSeconds, method, headers, params) +} + +// BasicGeneratePresignedUrl - basic interface to generate an authorization url with expire time +// +// PARAMS: +// - bucket: the target bucket name +// - object: the target object name +// - expireInSeconds: the expire time in seconds of the signed url +// RETURNS: +// - string: the presigned url with authorization string +func (c *Client) BasicGeneratePresignedUrl(bucket, object string, expireInSeconds int) string { + return api.GeneratePresignedUrl(c.Config, c.Signer, bucket, object, + expireInSeconds, "", nil, nil) +} + +// PutObjectAcl - set the ACL of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - aclBody: the acl json body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAcl(bucket, object string, aclBody *bce.Body) error { + return api.PutObjectAcl(c, bucket, object, "", nil, nil, aclBody) +} + +// PutObjectAclFromCanned - set the canned acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - cannedAcl: the cannedAcl string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromCanned(bucket, object, cannedAcl string) error { + return api.PutObjectAcl(c, bucket, object, cannedAcl, nil, nil, nil) +} + +// PutObjectAclGrantRead - set the canned grant read acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - ids: the user id list to grant read for this object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclGrantRead(bucket, object string, ids ...string) error { + return api.PutObjectAcl(c, bucket, object, "", ids, nil, nil) +} + +// PutObjectAclGrantFullControl - set the canned grant full-control acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - ids: the user id list to grant full-control for this object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclGrantFullControl(bucket, object string, ids ...string) error { + return api.PutObjectAcl(c, bucket, object, "", nil, ids, nil) +} + +// PutObjectAclFromFile - set the acl of the given object with acl json file name +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - aclFile: the acl file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromFile(bucket, object, aclFile string) error { + body, err := bce.NewBodyFromFile(aclFile) + if err != nil { + return err + } + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) +} + +// PutObjectAclFromString - set the acl of the given object with acl json string +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - aclString: the acl string with json format +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromString(bucket, object, aclString string) error { + body, err := bce.NewBodyFromString(aclString) + if err != nil { + return err + } + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) +} + +// PutObjectAclFromStruct - set the acl of the given object with acl data structure +// +// PARAMS: +// - bucket: the bucket name +// - aclObj: the acl struct object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromStruct(bucket, object string, aclObj *api.PutObjectAclArgs) error { + jsonBytes, jsonErr := json.Marshal(aclObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) +} + +// GetObjectAcl - get the acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - *api.GetObjectAclResult: the result of the object acl +// - error: nil if success otherwise the specific error +func (c *Client) GetObjectAcl(bucket, object string) (*api.GetObjectAclResult, error) { + return api.GetObjectAcl(c, bucket, object) +} + +// DeleteObjectAcl - delete the acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteObjectAcl(bucket, object string) error { + return api.DeleteObjectAcl(c, bucket, object) +} + +// RestoreObject - restore the archive object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - restoreDays: the effective time of restore +// - restoreTier: the tier of restore +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) RestoreObject(bucket string, object string, restoreDays int, restoreTier string) error { + if _, ok := api.VALID_RESTORE_TIER[restoreTier]; !ok { + return errors.New("invalid restore tier") + } + + if restoreDays <= 0 { + return errors.New("invalid restore days") + } + + args := api.ArchiveRestoreArgs{ + RestoreTier: restoreTier, + RestoreDays: restoreDays, + } + return api.RestoreObject(c, bucket, object, args) +} + +// PutBucketTrash - put the bucket trash +// +// PARAMS: +// - bucket: the bucket name +// - trashReq: the trash request +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketTrash(bucket string, trashReq api.PutBucketTrashReq) error { + return api.PutBucketTrash(c, bucket, trashReq) +} + +// GetBucketTrash - get the bucket trash +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketTrashResult,: the result of the bucket trash +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketTrash(bucket string) (*api.GetBucketTrashResult, error) { + return api.GetBucketTrash(c, bucket) +} + +// DeleteBucketTrash - delete the trash of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketTrash(bucket string) error { + return api.DeleteBucketTrash(c, bucket) +} + +// PutBucketNotification - put the bucket notification +// +// PARAMS: +// - bucket: the bucket name +// - putBucketNotificationReq: the bucket notification request +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketNotification(bucket string, putBucketNotificationReq api.PutBucketNotificationReq) error { + return api.PutBucketNotification(c, bucket, putBucketNotificationReq) +} + +// GetBucketNotification - get the bucket notification +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.PutBucketNotificationReq,: the result of the bucket notification +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketNotification(bucket string) (*api.PutBucketNotificationReq, error) { + return api.GetBucketNotification(c, bucket) +} + +// DeleteBucketNotification - delete the notification of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketNotification(bucket string) error { + return api.DeleteBucketNotification(c, bucket) +} + +// ParallelUpload - auto multipart upload object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - filename: the filename +// - contentType: the content type default(application/octet-stream) +// - args: the bucket name nil using default +// RETURNS: +// - *api.CompleteMultipartUploadResult: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) ParallelUpload(bucket string, object string, filename string, contentType string, args *api.InitiateMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { + + initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, bucket, object, contentType, args) + if err != nil { + return nil, err + } + + partEtags, err := c.parallelPartUpload(bucket, object, filename, initiateMultipartUploadResult.UploadId) + if err != nil { + c.AbortMultipartUpload(bucket, object, initiateMultipartUploadResult.UploadId) + return nil, err + } + + completeMultipartUploadResult, err := c.CompleteMultipartUploadFromStruct(bucket, object, initiateMultipartUploadResult.UploadId, &api.CompleteMultipartUploadArgs{Parts: partEtags}) + if err != nil { + c.AbortMultipartUpload(bucket, object, initiateMultipartUploadResult.UploadId) + return nil, err + } + return completeMultipartUploadResult, nil +} + +// parallelPartUpload - single part upload +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - filename: the uploadId +// - uploadId: the uploadId +// RETURNS: +// - []api.UploadInfoType: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) parallelPartUpload(bucket string, object string, filename string, uploadId string) ([]api.UploadInfoType, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + // 分块大小按MULTIPART_ALIGN=1MB对齐 + partSize := (c.MultipartSize + + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + + // 获取文件大小,并计算分块数目,最大分块数MAX_PART_NUMBER=10000 + fileInfo, _ := file.Stat() + fileSize := fileInfo.Size() + partNum := (fileSize + partSize - 1) / partSize + if partNum > MAX_PART_NUMBER { // 超过最大分块数,需调整分块大小 + partSize = (fileSize + MAX_PART_NUMBER + 1) / MAX_PART_NUMBER + partSize = (partSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum = (fileSize + partSize - 1) / partSize + } + + parallelChan := make(chan int, c.MaxParallel) + + errChan := make(chan error, c.MaxParallel) + + resultChan := make(chan api.UploadInfoType, partNum) + + // 逐个分块上传 + for i := int64(1); i <= partNum; i++ { + // 计算偏移offset和本次上传的大小uploadSize + uploadSize := partSize + offset := partSize * (i - 1) + left := fileSize - offset + if left < partSize { + uploadSize = left + } + + // 创建指定偏移、指定大小的文件流 + partBody, _ := bce.NewBodyFromSectionFile(file, offset, uploadSize) + + select { + case err = <-errChan: + return nil, err + default: + select { + case err = <-errChan: + return nil, err + case parallelChan <- 1: + go c.singlePartUpload(bucket, object, uploadId, int(i), partBody, parallelChan, errChan, resultChan) + } + + } + } + + partEtags := make([]api.UploadInfoType, partNum) + for i := int64(0); i < partNum; i++ { + select { + case err := <-errChan: + return nil, err + case result := <-resultChan: + partEtags[result.PartNumber-1].PartNumber = result.PartNumber + partEtags[result.PartNumber-1].ETag = result.ETag + } + } + return partEtags, nil +} + +// singlePartUpload - single part upload +// +// PARAMS: +// - pararelChan: the pararelChan +// - errChan: the error chan +// - result: the upload result chan +// - bucket: the bucket name +// - object: the object name +// - uploadId: the uploadId +// - partNumber: the part number of the object +// - content: the content of current part +func (c *Client) singlePartUpload( + bucket string, object string, uploadId string, + partNumber int, content *bce.Body, + parallelChan chan int, errChan chan error, result chan api.UploadInfoType) { + + defer func() { + if r := recover(); r != nil { + log.Fatal("parallelPartUpload recovered in f:", r) + errChan <- errors.New("parallelPartUpload panic") + } + <-parallelChan + }() + + var args api.UploadPartArgs + args.ContentMD5 = content.ContentMD5() + + etag, err := api.UploadPart(c, bucket, object, uploadId, partNumber, content, &args) + if err != nil { + errChan <- err + log.Error("upload part fail,err:%v", err) + return + } + result <- api.UploadInfoType{PartNumber: partNumber, ETag: etag} + return +} + +// ParallelCopy - auto multipart copy object +// +// PARAMS: +// - srcBucketName: the src bucket name +// - srcObjectName: the src object name +// - destBucketName: the dest bucket name +// - destObjectName: the dest object name +// - args: the copy args +// - srcClient: the src region client +// RETURNS: +// - *api.CompleteMultipartUploadResult: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) ParallelCopy(srcBucketName string, srcObjectName string, + destBucketName string, destObjectName string, + args *api.MultiCopyObjectArgs, srcClient *Client) (*api.CompleteMultipartUploadResult, error) { + + if srcClient == nil { + srcClient = c + } + objectMeta, err := srcClient.GetObjectMeta(srcBucketName, srcObjectName) + if err != nil { + return nil, err + } + + initArgs := api.InitiateMultipartUploadArgs{ + CacheControl: objectMeta.CacheControl, + ContentDisposition: objectMeta.ContentDisposition, + Expires: objectMeta.Expires, + StorageClass: objectMeta.StorageClass, + } + if args != nil { + if len(args.StorageClass) != 0 { + initArgs.StorageClass = args.StorageClass + } + } + initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, destBucketName, destObjectName, objectMeta.ContentType, &initArgs) + + if err != nil { + return nil, err + } + + source := fmt.Sprintf("/%s/%s", srcBucketName, srcObjectName) + partEtags, err := c.parallelPartCopy(*objectMeta, source, destBucketName, destObjectName, initiateMultipartUploadResult.UploadId) + + if err != nil { + c.AbortMultipartUpload(destBucketName, destObjectName, initiateMultipartUploadResult.UploadId) + return nil, err + } + + completeMultipartUploadResult, err := c.CompleteMultipartUploadFromStruct(destBucketName, destObjectName, initiateMultipartUploadResult.UploadId, &api.CompleteMultipartUploadArgs{Parts: partEtags}) + if err != nil { + c.AbortMultipartUpload(destBucketName, destObjectName, initiateMultipartUploadResult.UploadId) + return nil, err + } + return completeMultipartUploadResult, nil +} + +// parallelPartCopy - parallel part copy +// +// PARAMS: +// - srcMeta: the copy source object meta +// - source: the copy source +// - bucket: the dest bucket name +// - object: the dest object name +// - uploadId: the uploadId +// RETURNS: +// - []api.UploadInfoType: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) parallelPartCopy(srcMeta api.GetObjectMetaResult, source string, bucket string, object string, uploadId string) ([]api.UploadInfoType, error) { + var err error + size := srcMeta.ContentLength + partSize := int64(DEFAULT_MULTIPART_SIZE) + + partNum := (size + partSize - 1) / partSize + + parallelChan := make(chan int, c.MaxParallel) + + errChan := make(chan error, c.MaxParallel) + + resultChan := make(chan api.UploadInfoType, partNum) + + for i := int64(1); i <= partNum; i++ { + // 计算偏移offset和本次上传的大小uploadSize + uploadSize := partSize + offset := partSize * (i - 1) + left := size - offset + if left < partSize { + uploadSize = left + } + + partCopyArgs := api.UploadPartCopyArgs{ + SourceRange: fmt.Sprintf("bytes=%d-%d", (i-1)*partSize, (i-1)*partSize+uploadSize-1), + IfMatch: srcMeta.ETag, + } + + select { + case err = <-errChan: + return nil, err + default: + select { + case err = <-errChan: + return nil, err + case parallelChan <- 1: + go c.singlePartCopy(source, bucket, object, uploadId, int(i), &partCopyArgs, parallelChan, errChan, resultChan) + } + + } + } + + partEtags := make([]api.UploadInfoType, partNum) + for i := int64(0); i < partNum; i++ { + select { + case err := <-errChan: + return nil, err + case result := <-resultChan: + partEtags[result.PartNumber-1].PartNumber = result.PartNumber + partEtags[result.PartNumber-1].ETag = result.ETag + } + } + return partEtags, nil +} + +// singlePartCopy - single part copy +// +// PARAMS: +// - pararelChan: the pararelChan +// - errChan: the error chan +// - result: the upload result chan +// - source: the copy source +// - bucket: the bucket name +// - object: the object name +// - uploadId: the uploadId +// - partNumber: the part number of the object +// - args: the copy args +func (c *Client) singlePartCopy(source string, bucket string, object string, uploadId string, + partNumber int, args *api.UploadPartCopyArgs, + parallelChan chan int, errChan chan error, result chan api.UploadInfoType) { + + defer func() { + if r := recover(); r != nil { + log.Fatal("parallelPartUpload recovered in f:", r) + errChan <- errors.New("parallelPartUpload panic") + } + <-parallelChan + }() + + copyObjectResult, err := api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args) + if err != nil { + errChan <- err + log.Error("upload part fail,err:%v", err) + return + } + result <- api.UploadInfoType{PartNumber: partNumber, ETag: copyObjectResult.ETag} + return +} + +// PutSymlink - create symlink for exist target object +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - symlinkKey: the name of the symlink +// - symlinkArgs: the optional arguments +// RETURNS: +// - error: the put error if any occurs +func (c *Client) PutSymlink(bucket string, object string, symlinkKey string, symlinkArgs *api.PutSymlinkArgs) error { + return api.PutObjectSymlink(c, bucket, object, symlinkKey, symlinkArgs) +} + +// PutSymlink - create symlink for exist target object +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the symlink +// RETURNS: +// - string: the target of the symlink +// - error: the put error if any occurs +func (c *Client) GetSymlink(bucket string, object string) (string, error) { + return api.GetObjectSymlink(c, bucket, object) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go b/vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go new file mode 100644 index 0000000000000..8cdcfda5e9e36 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go @@ -0,0 +1,373 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// logger.go - defines the logger structure and methods + +// Package log implements the log facilities for BCE. It supports log to stderr, stdout as well as +// log to file with rotating. It is safe to be called by multiple goroutines. +// By using the package level function to use the default logger: +// log.SetLogHandler(log.STDOUT | log.FILE) // default is log to stdout +// log.SetLogDir("/tmp") // default is /tmp +// log.SetRotateType(log.ROTATE_DAY) // default is log.HOUR +// log.SetRotateSize(1 << 30) // default is 1GB +// log.SetLogLevel(log.INFO) // default is log.DEBUG +// log.Debug(1, 1.2, "a") +// log.Debugln(1, 1.2, "a") +// log.Debugf(1, 1.2, "a") +// User can also create new logger without using the default logger: +// customLogger := log.NewLogger() +// customLogger.SetLogHandler(log.FILE) +// customLogger.Debug(1, 1.2, "a") +// The log format can also support custom setting by using the following interface: +// log.SetLogFormat([]string{log.FMT_LEVEL, log.FMT_TIME, log.FMT_MSG}) +// Most of the cases just use the default format is enough: +// []string{FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG} +package log + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +type Handler uint8 + +// Constants for log handler flags, default is STDOUT +const ( + NONE Handler = 0 + STDOUT Handler = 1 + STDERR Handler = 1 << 1 + FILE Handler = 1 << 2 +) + +type RotateStrategy uint8 + +// Constants for log rotating strategy when logging to file, default is by hour +const ( + ROTATE_NONE RotateStrategy = iota + ROTATE_DAY + ROTATE_HOUR + ROTATE_MINUTE + ROTATE_SIZE + + DEFAULT_ROTATE_TYPE = ROTATE_HOUR + DEFAULT_ROTATE_SIZE int64 = 1 << 30 + DEFAULT_LOG_DIR = "/tmp" + ROTATE_SIZE_FILE_PREFIX = "rotating" +) + +type Level uint8 + +// Constants for log levels, default is DEBUG +const ( + DEBUG Level = iota + INFO + WARN + ERROR + FATAL + PANIC +) + +var gLevelString = [...]string{"DEBUG", "INFO", "WARN", "ERROR", "FATAL", "PANIC"} + +// Constants of the log format components to support user custom specification +const ( + FMT_LEVEL = "level" + FMT_LTIME = "ltime" // long time with microsecond + FMT_TIME = "time" // just with second + FMT_LOCATION = "location" // caller's location with file, line, function + FMT_MSG = "msg" +) + +var ( + LOG_FMT_STR = map[string]string{ + FMT_LEVEL: "[%s]", + FMT_LTIME: "2006-01-02 15:04:05.000000", + FMT_TIME: "2006-01-02 15:04:05", + FMT_LOCATION: "%s:%d:%s:", + FMT_MSG: "%s", + } + gDefaultLogFormat = []string{FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG} +) + +type writerArgs struct { + record string + recordDone chan bool + rotateArgs interface{} // used for rotating: the size of the record or the logging time +} + +// Logger defines the internal implementation of the log facility +type logger struct { + writers map[Handler]io.WriteCloser // the destination writer to log message + writerChan chan *writerArgs // the writer channal to pass each record and time or size + logFormat []string + levelThreshold Level + handler Handler + + // Fields that used when logging to file + logDir string + logFile string + rotateType RotateStrategy + rotateSize int64 +} + +func (l *logger) logging(level Level, format string, args ...interface{}) { + // Only log message that set the handler and is greater than or equal to the threshold + if l.handler == NONE || level < l.levelThreshold { + return + } + + // Generate the log record string and pass it to the writer channel + now := time.Now() + pc, file, line, ok, funcname := uintptr(0), "???", 0, true, "???" + pc, file, line, ok = runtime.Caller(2) + if ok { + funcname = runtime.FuncForPC(pc).Name() + funcname = filepath.Ext(funcname) + funcname = strings.TrimPrefix(funcname, ".") + file = filepath.Base(file) + } + buf := make([]string, 0, len(l.logFormat)) + msg := fmt.Sprintf(format, args...) + for _, f := range l.logFormat { + if _, exists := LOG_FMT_STR[f]; !exists { // skip not supported part + continue + } + fmtStr := LOG_FMT_STR[f] + switch f { + case FMT_LEVEL: + buf = append(buf, fmt.Sprintf(fmtStr, gLevelString[level])) + case FMT_LTIME: + buf = append(buf, now.Format(fmtStr)) + case FMT_TIME: + buf = append(buf, now.Format(fmtStr)) + case FMT_LOCATION: + buf = append(buf, fmt.Sprintf(fmtStr, file, line, funcname)) + case FMT_MSG: + buf = append(buf, fmt.Sprintf(fmtStr, msg)) + } + } + record := strings.Join(buf, " ") + logRecordDone := make(chan bool) + if l.rotateType == ROTATE_SIZE { + l.writerChan <- &writerArgs{record, logRecordDone, int64(len(record))} + } else { + l.writerChan <- &writerArgs{record, logRecordDone, now} + } + <-logRecordDone // wait for current record done logging +} + +func (l *logger) buildWriter(args interface{}) { + if l.handler&STDOUT == STDOUT { + l.writers[STDOUT] = os.Stdout + } else { + delete(l.writers, STDOUT) + } + if l.handler&STDERR == STDERR { + l.writers[STDERR] = os.Stderr + } else { + delete(l.writers, STDERR) + } + if l.handler&FILE == FILE { + l.writers[FILE] = l.buildFileWriter(args) + } else { + delete(l.writers, FILE) + } +} + +func (l *logger) buildFileWriter(args interface{}) io.WriteCloser { + if l.handler&FILE != FILE { + return os.Stderr + } + + if len(l.logDir) == 0 { + l.logDir = DEFAULT_LOG_DIR + } + if l.rotateType < ROTATE_NONE || l.rotateType > ROTATE_SIZE { + l.rotateType = DEFAULT_ROTATE_TYPE + } + if l.rotateType == ROTATE_SIZE && l.rotateSize == 0 { + l.rotateSize = DEFAULT_ROTATE_SIZE + } + + logFile, needCreateFile := "", false + if l.rotateType == ROTATE_SIZE { + recordSize, _ := args.(int64) + logFile, needCreateFile = l.buildFileWriterBySize(recordSize) + } else { + recordTime, _ := args.(time.Time) + switch l.rotateType { + case ROTATE_NONE: + logFile = "default.log" + case ROTATE_DAY: + logFile = recordTime.Format("2006-01-02.log") + case ROTATE_HOUR: + logFile = recordTime.Format("2006-01-02_15.log") + case ROTATE_MINUTE: + logFile = recordTime.Format("2006-01-02_15-04.log") + } + if _, exist := getFileInfo(filepath.Join(l.logDir, logFile)); !exist { + needCreateFile = true + } + } + l.logFile = logFile + logFile = filepath.Join(l.logDir, l.logFile) + + // Should create new file + if needCreateFile { + if w, ok := l.writers[FILE]; ok { + w.Close() + } + if writer, err := os.Create(logFile); err == nil { + return writer + } else { + return os.Stderr + } + } + + // Already open the file + if w, ok := l.writers[FILE]; ok { + return w + } + + // Newly open the file + if writer, err := os.OpenFile(logFile, os.O_WRONLY|os.O_APPEND, 0666); err == nil { + return writer + } else { + return os.Stderr + } +} + +func (l *logger) buildFileWriterBySize(recordSize int64) (string, bool) { + logFile, needCreateFile := "", false + // First running the program and need to get filename by checking the existed files + if len(l.logFile) == 0 { + fname := fmt.Sprintf("%s-%s.0.log", ROTATE_SIZE_FILE_PREFIX, getSizeString(l.rotateSize)) + for { + size, exist := getFileInfo(filepath.Join(l.logDir, fname)) + if !exist { + logFile, needCreateFile = fname, true + break + } + if exist && size+recordSize <= l.rotateSize { + logFile, needCreateFile = fname, false + break + } + fname = getNextFileName(fname) + } + } else { // check the file size to append to the existed file or create a new file + currentFile := filepath.Join(l.logDir, l.logFile) + size, exist := getFileInfo(currentFile) + if !exist { + logFile, needCreateFile = l.logFile, true + } else { + if size+recordSize > l.rotateSize { // size exceeded + logFile, needCreateFile = getNextFileName(l.logFile), true + } else { + logFile, needCreateFile = l.logFile, false + } + } + } + return logFile, needCreateFile +} + +func (l *logger) SetHandler(h Handler) { l.handler = h } + +func (l *logger) SetLogDir(dir string) { l.logDir = dir } + +func (l *logger) SetLogLevel(level Level) { l.levelThreshold = level } + +func (l *logger) SetLogFormat(format []string) { l.logFormat = format } + +func (l *logger) SetRotateType(rotate RotateStrategy) { l.rotateType = rotate } + +func (l *logger) SetRotateSize(size int64) { l.rotateSize = size } + +func (l *logger) Debug(msg ...interface{}) { l.logging(DEBUG, "%s\n", concat(msg...)) } + +func (l *logger) Debugln(msg ...interface{}) { l.logging(DEBUG, "%s\n", concat(msg...)) } + +func (l *logger) Debugf(f string, msg ...interface{}) { l.logging(DEBUG, f+"\n", msg...) } + +func (l *logger) Info(msg ...interface{}) { l.logging(INFO, "%s\n", concat(msg...)) } + +func (l *logger) Infoln(msg ...interface{}) { l.logging(INFO, "%s\n", concat(msg...)) } + +func (l *logger) Infof(f string, msg ...interface{}) { l.logging(INFO, f+"\n", msg...) } + +func (l *logger) Warn(msg ...interface{}) { l.logging(WARN, "%s\n", concat(msg...)) } + +func (l *logger) Warnln(msg ...interface{}) { l.logging(WARN, "%s\n", concat(msg...)) } + +func (l *logger) Warnf(f string, msg ...interface{}) { l.logging(WARN, f+"\n", msg...) } + +func (l *logger) Error(msg ...interface{}) { l.logging(ERROR, "%s\n", concat(msg...)) } + +func (l *logger) Errorln(msg ...interface{}) { l.logging(ERROR, "%s\n", concat(msg...)) } + +func (l *logger) Errorf(f string, msg ...interface{}) { l.logging(ERROR, f+"\n", msg...) } + +func (l *logger) Fatal(msg ...interface{}) { l.logging(FATAL, "%s\n", concat(msg...)) } + +func (l *logger) Fatalln(msg ...interface{}) { l.logging(FATAL, "%s\n", concat(msg...)) } + +func (l *logger) Fatalf(f string, msg ...interface{}) { l.logging(FATAL, f+"\n", msg...) } + +func (l *logger) Panic(msg ...interface{}) { + record := concat(msg...) + l.logging(PANIC, "%s\n", record) + panic(record) +} + +func (l *logger) Panicln(msg ...interface{}) { + record := concat(msg...) + l.logging(PANIC, "%s\n", record) + panic(record) +} + +func (l *logger) Panicf(format string, msg ...interface{}) { + record := fmt.Sprintf(format, msg...) + l.logging(PANIC, format+"\n", msg...) + panic(record) +} + +func NewLogger() *logger { + obj := &logger{ + writers: make(map[Handler]io.WriteCloser, 3), // now only support 3 kinds of handler + writerChan: make(chan *writerArgs), + logFormat: gDefaultLogFormat, + levelThreshold: DEBUG, + handler: NONE, + } + + // The backend writer goroutine to write each log record + go func() { + for { + select { + case args := <-obj.writerChan: // wait until a record comes to log + obj.buildWriter(args.rotateArgs) + for _, w := range obj.writers { + fmt.Fprint(w, args.record) + } + args.recordDone <- true + } + } + }() + + return obj +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/log/util.go b/vendor/github.com/baidubce/bce-sdk-go/util/log/util.go new file mode 100644 index 0000000000000..9bfc18abfd952 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/log/util.go @@ -0,0 +1,223 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// util.go - define the package-level default logger and functions for easily use. + +package log + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// The global default logger object to perform logging in this package-level functions +var gDefaultLogger *logger + +func init() { + gDefaultLogger = NewLogger() +} + +// Helper functions +func getFileInfo(path string) (int64, bool) { + info, err := os.Stat(path) + if err != nil { + return -1, false + } else { + return info.Size(), true + } +} + +func getSizeString(size int64) string { + if size < 0 { + return fmt.Sprintf("%d", size) + } + if size < (1 << 10) { + return fmt.Sprintf("%dB", size) + } else if size < (1 << 20) { + return fmt.Sprintf("%dkB", size>>10) + } else if size < (1 << 30) { + return fmt.Sprintf("%dMB", size>>20) + } else if size < (1 << 40) { + return fmt.Sprintf("%dGB", size>>30) + } else { + return "SUPER-LARGE" + } +} + +func getNextFileName(nowFileName string) string { + pos1 := strings.Index(nowFileName, ".") + rest := nowFileName[pos1+1:] + pos2 := strings.Index(rest, ".") + num, _ := strconv.Atoi(rest[0:pos2]) + return fmt.Sprintf("%s.%d.log", nowFileName[0:pos1], num+1) +} + +func concat(msg ...interface{}) string { + buf := make([]string, 0, len(msg)) + for _, m := range msg { + buf = append(buf, fmt.Sprintf("%v", m)) + } + return strings.Join(buf, " ") +} + +// Export package-level functions for logging messages by using the default logger. It supports 3 +// kinds of interface which is similar to the standard package "fmt": with suffix of "ln", "f" or +// nothing. +func Debug(msg ...interface{}) { + gDefaultLogger.logging(DEBUG, "%s\n", concat(msg...)) +} + +func Debugln(msg ...interface{}) { + gDefaultLogger.logging(DEBUG, "%s\n", concat(msg...)) +} + +func Debugf(format string, msg ...interface{}) { + gDefaultLogger.logging(DEBUG, format+"\n", msg...) +} + +func Info(msg ...interface{}) { + gDefaultLogger.logging(INFO, "%s\n", concat(msg...)) +} + +func Infoln(msg ...interface{}) { + gDefaultLogger.logging(INFO, "%s\n", concat(msg...)) +} + +func Infof(format string, msg ...interface{}) { + gDefaultLogger.logging(INFO, format+"\n", msg...) +} + +func Warn(msg ...interface{}) { + gDefaultLogger.logging(WARN, "%s\n", concat(msg...)) +} + +func Warnln(msg ...interface{}) { + gDefaultLogger.logging(WARN, "%s\n", concat(msg...)) +} + +func Warnf(format string, msg ...interface{}) { + gDefaultLogger.logging(WARN, format+"\n", msg...) +} + +func Error(msg ...interface{}) { + gDefaultLogger.logging(ERROR, "%s\n", concat(msg...)) +} + +func Errorln(msg ...interface{}) { + gDefaultLogger.logging(ERROR, "%s\n", concat(msg...)) +} + +func Errorf(format string, msg ...interface{}) { + gDefaultLogger.logging(ERROR, format+"\n", msg...) +} + +func Fatal(msg ...interface{}) { + gDefaultLogger.logging(FATAL, "%s\n", concat(msg...)) +} + +func Fatalln(msg ...interface{}) { + gDefaultLogger.logging(FATAL, "%s\n", concat(msg...)) +} + +func Fatalf(format string, msg ...interface{}) { + gDefaultLogger.logging(FATAL, format+"\n", msg...) +} + +func Panic(msg ...interface{}) { + record := concat(msg...) + gDefaultLogger.logging(PANIC, "%s\n", record) + panic(record) +} + +func Panicln(msg ...interface{}) { + record := concat(msg...) + gDefaultLogger.logging(PANIC, "%s\n", record) + panic(record) +} + +func Panicf(format string, msg ...interface{}) { + record := fmt.Sprintf(format, msg...) + gDefaultLogger.logging(PANIC, format+"\n", msg...) + panic(record) +} + +// SetLogHandler - set the handler of the logger +// +// PARAMS: +// - Handler: the handler defined in this package, now just support STDOUT, STDERR, FILE +func SetLogHandler(h Handler) { + gDefaultLogger.handler = h +} + +// SetLogLevel - set the level threshold of the logger, only level equal to or bigger than this +// value will be logged. +// +// PARAMS: +// - Level: the level defined in this package, now support 6 levels. +func SetLogLevel(l Level) { + gDefaultLogger.levelThreshold = l +} + +// SetLogFormat - set the log component of each record when logging it. The default log format is +// {FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG}. +// +// PARAMS: +// - format: the format component array. +func SetLogFormat(format []string) { + gDefaultLogger.logFormat = format +} + +// SetLogDir - set the logging directory if logging to file. +// +// PARAMS: +// - dir: the logging directory +// RETURNS: +// - error: check the directory and try to make it, otherwise return the error. +func SetLogDir(dir string) error { + if _, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if mkErr := os.Mkdir(dir, os.ModePerm); mkErr != nil { + return mkErr + } + } else { + return err + } + } + gDefaultLogger.logDir = dir + return nil +} + +// SetRotateType - set the rotating strategy if logging to file. +// +// PARAMS: +// - RotateStrategy: the rotate strategy defined in this package, now support 5 strategy. +func SetRotateType(r RotateStrategy) { + gDefaultLogger.rotateType = r +} + +// SetRotateSize - set the rotating size if logging to file and set the strategy of size. +// +// PARAMS: +// - size: the rotating size +// RETURNS: +// - error: check the value and return any error if error occurs. +func SetRotateSize(size int64) error { + if size <= 0 { + return fmt.Errorf("%s", "rotate size should not be negative") + } + gDefaultLogger.rotateSize = size + return nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/string.go b/vendor/github.com/baidubce/bce-sdk-go/util/string.go new file mode 100644 index 0000000000000..3bb0b10de244a --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/string.go @@ -0,0 +1,88 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// string.go - define the string util function + +// Package util defines the common utilities including string and time. +package util + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "io" +) + +func HmacSha256Hex(key, str_to_sign string) string { + hasher := hmac.New(sha256.New, []byte(key)) + hasher.Write([]byte(str_to_sign)) + return hex.EncodeToString(hasher.Sum(nil)) +} + +func CalculateContentMD5(data io.Reader, size int64) (string, error) { + hasher := md5.New() + n, err := io.CopyN(hasher, data, size) + if err != nil { + return "", fmt.Errorf("calculate content-md5 occurs error: %v", err) + } + if n != size { + return "", fmt.Errorf("calculate content-md5 writing size %d != size %d", n, size) + } + return base64.StdEncoding.EncodeToString(hasher.Sum(nil)), nil +} + +func UriEncode(uri string, encodeSlash bool) string { + var byte_buf bytes.Buffer + for _, b := range []byte(uri) { + if (b >= 'A' && b <= 'Z') || (b >= 'a' && b <= 'z') || (b >= '0' && b <= '9') || + b == '-' || b == '_' || b == '.' || b == '~' || (b == '/' && !encodeSlash) { + byte_buf.WriteByte(b) + } else { + byte_buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + return byte_buf.String() +} + +func NewUUID() string { + var buf [16]byte + for { + if _, err := rand.Read(buf[:]); err == nil { + break + } + } + buf[6] = (buf[6] & 0x0f) | (4 << 4) + buf[8] = (buf[8] & 0xbf) | 0x80 + + res := make([]byte, 36) + hex.Encode(res[0:8], buf[0:4]) + res[8] = '-' + hex.Encode(res[9:13], buf[4:6]) + res[13] = '-' + hex.Encode(res[14:18], buf[6:8]) + res[18] = '-' + hex.Encode(res[19:23], buf[8:10]) + res[23] = '-' + hex.Encode(res[24:], buf[10:]) + return string(res) +} + +func NewRequestId() string { + return NewUUID() +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/time.go b/vendor/github.com/baidubce/bce-sdk-go/util/time.go new file mode 100644 index 0000000000000..e9f9e3d1babe6 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/time.go @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// time.go - define the time utility functions for BCE + +package util + +import "time" + +const ( + RFC822Format = "Mon, 02 Jan 2006 15:04:05 MST" + ISO8601Format = "2006-01-02T15:04:05Z" +) + +func NowUTCSeconds() int64 { return time.Now().UTC().Unix() } + +func NowUTCNanoSeconds() int64 { return time.Now().UTC().UnixNano() } + +func FormatRFC822Date(timestamp_second int64) string { + tm := time.Unix(timestamp_second, 0).UTC() + return tm.Format(RFC822Format) +} + +func ParseRFC822Date(time_string string) (time.Time, error) { + return time.Parse(RFC822Format, time_string) +} + +func FormatISO8601Date(timestamp_second int64) string { + tm := time.Unix(timestamp_second, 0).UTC() + return tm.Format(ISO8601Format) +} + +func ParseISO8601Date(time_string string) (time.Time, error) { + return time.Parse(ISO8601Format, time_string) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7e4e5cec924db..5b7ed3d9b61b3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -185,6 +185,15 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/baidubce/bce-sdk-go v0.9.81 +## explicit; go 1.11 +github.com/baidubce/bce-sdk-go/auth +github.com/baidubce/bce-sdk-go/bce +github.com/baidubce/bce-sdk-go/http +github.com/baidubce/bce-sdk-go/services/bos +github.com/baidubce/bce-sdk-go/services/bos/api +github.com/baidubce/bce-sdk-go/util +github.com/baidubce/bce-sdk-go/util/log # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile From 45dd588d8fe6ae1156bc9d38fefaa7dd9fe45ae3 Mon Sep 17 00:00:00 2001 From: JordanRushing Date: Thu, 5 May 2022 15:59:45 -0500 Subject: [PATCH 076/223] Update consistent hash ring docs with new index gateway ring (#6041) * Update consistent hash ring docs with new index gateway ring Signed-off-by: JordanRushing * Update docs/sources/fundamentals/architecture/rings.md Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> Co-authored-by: Karen Miller <84039272+KMiller-Grafana@users.noreply.github.com> --- docs/sources/fundamentals/architecture/rings.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/sources/fundamentals/architecture/rings.md b/docs/sources/fundamentals/architecture/rings.md index 33b2865c0da1d..aa8d89fba8411 100644 --- a/docs/sources/fundamentals/architecture/rings.md +++ b/docs/sources/fundamentals/architecture/rings.md @@ -8,11 +8,11 @@ weight: 40 are incorporated into Loki cluster architectures to - aid in the sharding of log lines -- implement high availability +- implement high availability - ease the horizontal scale up and scale down of clusters. There is less of a performance hit for operations that must rebalance data. -Hash rings connect instances of a single type of component when +Hash rings connect instances of a single type of component when - there are a set of Loki instances in monolithic deployment mode - there are multiple read components or multiple write components in @@ -28,6 +28,9 @@ These components need to be connected into a hash ring: - compactors - rulers +These components can optionally be connected into a hash ring: +- index gateway + In an architecture that has three distributors and three ingestors defined, the hash rings for these components connect the instances of same-type components. @@ -91,3 +94,7 @@ despite the compactor target being on multiple instances. ## About the ruler ring The ruler ring is used to determine which rulers evaluate which rule groups. + +## About the index gateway ring + +The index gateway ring is used to determine which gateway is responsible for which tenant's indexes when queried by rulers or queriers. From 06e309bae6433036a76775125bd72b16003fba7d Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Fri, 6 May 2022 07:21:59 +0530 Subject: [PATCH 077/223] do not increment sync operation count metric on per index set (#6109) --- pkg/storage/stores/shipper/downloads/index_set.go | 13 +------------ .../stores/shipper/downloads/index_set_test.go | 3 +-- pkg/storage/stores/shipper/downloads/table.go | 9 +++------ 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/pkg/storage/stores/shipper/downloads/index_set.go b/pkg/storage/stores/shipper/downloads/index_set.go index a6c9347e89ecf..dfb20dbc2eaff 100644 --- a/pkg/storage/stores/shipper/downloads/index_set.go +++ b/pkg/storage/stores/shipper/downloads/index_set.go @@ -45,7 +45,6 @@ type indexSet struct { baseIndexSet storage.IndexSet tableName, userID string cacheLocation string - metrics *metrics boltDBIndexClient BoltDBIndexClient logger log.Logger @@ -58,8 +57,7 @@ type indexSet struct { } func NewIndexSet(tableName, userID, cacheLocation string, baseIndexSet storage.IndexSet, - boltDBIndexClient BoltDBIndexClient, logger log.Logger, metrics *metrics, -) (IndexSet, error) { + boltDBIndexClient BoltDBIndexClient, logger log.Logger) (IndexSet, error) { if baseIndexSet.IsUserBasedIndexSet() && userID == "" { return nil, fmt.Errorf("userID must not be empty") } else if !baseIndexSet.IsUserBasedIndexSet() && userID != "" { @@ -76,7 +74,6 @@ func NewIndexSet(tableName, userID, cacheLocation string, baseIndexSet storage.I tableName: tableName, userID: userID, cacheLocation: cacheLocation, - metrics: metrics, boltDBIndexClient: boltDBIndexClient, logger: logger, lastUsedAt: time.Now(), @@ -284,14 +281,6 @@ func (t *indexSet) Sync(ctx context.Context) (err error) { func (t *indexSet) sync(ctx context.Context, lock, bypassListCache bool) (err error) { level.Debug(t.logger).Log("msg", "syncing index files") - defer func() { - status := statusSuccess - if err != nil { - status = statusFailure - } - t.metrics.tablesSyncOperationTotal.WithLabelValues(status).Inc() - }() - toDownload, toDelete, err := t.checkStorageForUpdates(ctx, lock, bypassListCache) if err != nil { return err diff --git a/pkg/storage/stores/shipper/downloads/index_set_test.go b/pkg/storage/stores/shipper/downloads/index_set_test.go index 0a7ddeb6ed2ad..cd07d6a389e80 100644 --- a/pkg/storage/stores/shipper/downloads/index_set_test.go +++ b/pkg/storage/stores/shipper/downloads/index_set_test.go @@ -22,8 +22,7 @@ func buildTestIndexSet(t *testing.T, userID, path string) (*indexSet, stopFunc) cachePath := filepath.Join(path, cacheDirName) baseIndexSet := storage.NewIndexSet(storageClient, userID != "") - idxSet, err := NewIndexSet(tableName, userID, filepath.Join(cachePath, tableName, userID), baseIndexSet, - boltDBIndexClient, util_log.Logger, newMetrics(nil)) + idxSet, err := NewIndexSet(tableName, userID, filepath.Join(cachePath, tableName, userID), baseIndexSet, boltDBIndexClient, util_log.Logger) require.NoError(t, err) require.NoError(t, idxSet.Init(false)) diff --git a/pkg/storage/stores/shipper/downloads/table.go b/pkg/storage/stores/shipper/downloads/table.go index 8153423475a72..086c09cc3f284 100644 --- a/pkg/storage/stores/shipper/downloads/table.go +++ b/pkg/storage/stores/shipper/downloads/table.go @@ -108,8 +108,7 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, boltDBI } userID := fileInfo.Name() - userIndexSet, err := NewIndexSet(name, userID, filepath.Join(cacheLocation, userID), - table.baseUserIndexSet, boltDBIndexClient, loggerWithUserID(table.logger, userID), metrics) + userIndexSet, err := NewIndexSet(name, userID, filepath.Join(cacheLocation, userID), table.baseUserIndexSet, boltDBIndexClient, loggerWithUserID(table.logger, userID)) if err != nil { return nil, err } @@ -122,8 +121,7 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, boltDBI table.indexSets[userID] = userIndexSet } - commonIndexSet, err := NewIndexSet(name, "", cacheLocation, table.baseCommonIndexSet, - boltDBIndexClient, table.logger, metrics) + commonIndexSet, err := NewIndexSet(name, "", cacheLocation, table.baseCommonIndexSet, boltDBIndexClient, table.logger) if err != nil { return nil, err } @@ -298,8 +296,7 @@ func (t *table) getOrCreateIndexSet(ctx context.Context, id string, forQuerying } // instantiate the index set, add it to the map - indexSet, err = NewIndexSet(t.name, id, filepath.Join(t.cacheLocation, id), baseIndexSet, t.boltDBIndexClient, - loggerWithUserID(t.logger, id), t.metrics) + indexSet, err = NewIndexSet(t.name, id, filepath.Join(t.cacheLocation, id), baseIndexSet, t.boltDBIndexClient, loggerWithUserID(t.logger, id)) if err != nil { return nil, err } From 8838530976e13f6969849cffd53ca03445bc87a4 Mon Sep 17 00:00:00 2001 From: Danny Kopping Date: Fri, 6 May 2022 12:24:37 +0200 Subject: [PATCH 078/223] Document `/services` API endpoint (#6111) * Documented /services API endpoint Signed-off-by: Danny Kopping * Grammar Signed-off-by: Danny Kopping --- docs/sources/api/_index.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md index 5804a167a5a42..639300a56288d 100644 --- a/docs/sources/api/_index.md +++ b/docs/sources/api/_index.md @@ -19,6 +19,7 @@ These endpoints are exposed by all components: - [`GET /ready`](#get-ready) - [`GET /metrics`](#get-metrics) - [`GET /config`](#get-config) +- [`GET /services`](#get-services) - [`GET /loki/api/v1/status/buildinfo`](#get-lokiapiv1statusbuildinfo) These endpoints are exposed by the querier and the query frontend: @@ -818,6 +819,19 @@ and the current are returned. A value of `defaults` returns the default configur In microservices mode, the `/config` endpoint is exposed by all components. +## `GET /services` + +`/services` returns a list of all running services and their current states. + +Services can have the following states: + +- **New**: Service is new, not running yet (initial state) +- **Starting**: Service is starting; if starting succeeds, service enters **Running** state +- **Running**: Service is fully running now; when service stops running, it enters **Stopping** state +- **Stopping**: Service is shutting down +- **Terminated**: Service has stopped successfully (terminal state) +- **Failed**: Service has failed in **Starting**, **Running** or **Stopping** state (terminal state) + ## `GET /loki/api/v1/status/buildinfo` `/loki/api/v1/status/buildinfo` exposes the build information in a JSON object. The fields are `version`, `revision`, `branch`, `buildDate`, `buildUser`, and `goVersion`. From 55ea89f9fded65b5c8a11dde4649d8b20e8fe296 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 6 May 2022 08:25:14 -0400 Subject: [PATCH 079/223] Tsdb/head wal feedback (#6107) * pr feedback, improved mutex unlocking * tenant heads use sync.Map * tenant heads bench * Revert "tenant heads use sync.Map" This reverts commit 64adebc5354f270d8a27333678543a5c83edbefe. * removes old code the removed TSDBs outside the shipper + minor fixes --- pkg/storage/stores/tsdb/chunkwriter.go | 2 +- pkg/storage/stores/tsdb/head.go | 15 ++-- pkg/storage/stores/tsdb/head_manager.go | 86 ++++++++------------ pkg/storage/stores/tsdb/head_manager_test.go | 56 +++++++++++++ 4 files changed, 97 insertions(+), 62 deletions(-) diff --git a/pkg/storage/stores/tsdb/chunkwriter.go b/pkg/storage/stores/tsdb/chunkwriter.go index 5b2da8f0db4c5..6241ec47d500c 100644 --- a/pkg/storage/stores/tsdb/chunkwriter.go +++ b/pkg/storage/stores/tsdb/chunkwriter.go @@ -50,7 +50,7 @@ func (w *ChunkWriter) Put(ctx context.Context, chunks []chunk.Chunk) error { } func (w *ChunkWriter) PutOne(ctx context.Context, from, through model.Time, chk chunk.Chunk) error { - log, ctx := spanlogger.New(ctx, "SeriesStore.PutOne") + log, ctx := spanlogger.New(ctx, "TSDBStore.PutOne") defer log.Finish() // with local TSDB indices, we _always_ write the index entry diff --git a/pkg/storage/stores/tsdb/head.go b/pkg/storage/stores/tsdb/head.go index 3e074ac372b7d..dac903aa65504 100644 --- a/pkg/storage/stores/tsdb/head.go +++ b/pkg/storage/stores/tsdb/head.go @@ -103,20 +103,15 @@ type Head struct { series *stripeSeries postings *index.MemPostings // Postings lists for terms. - - closedMtx sync.Mutex - closed bool } func NewHead(tenant string, metrics *Metrics, logger log.Logger) *Head { return &Head{ - tenant: tenant, - metrics: metrics, - logger: logger, - series: newStripeSeries(), - postings: index.NewMemPostings(), - closedMtx: sync.Mutex{}, - closed: false, + tenant: tenant, + metrics: metrics, + logger: logger, + series: newStripeSeries(), + postings: index.NewMemPostings(), } } diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go index 93bfe015086f2..7ab5003cdb50e 100644 --- a/pkg/storage/stores/tsdb/head_manager.go +++ b/pkg/storage/stores/tsdb/head_manager.go @@ -27,10 +27,28 @@ import ( "github.com/grafana/loki/pkg/util/wal" ) +/* +period is a duration which the ingesters use to group index writes into a (WAL,TenantHeads) pair. +After each period elapses, a set of zero or more multitenant TSDB indices are built (one per +index bucket, generally 24h). + +It's important to note that this cycle occurs in real time as opposed to the timestamps of +chunk entries. Index writes during the `period` may span multiple index buckets. Periods +also expose some helper functions to get the remainder-less offset integer for that period, +which we use in file creation/etc. +*/ type period time.Duration const defaultRotationPeriod = period(15 * time.Minute) +func (p period) PeriodFor(t time.Time) int { + return int(t.UnixNano() / int64(p)) +} + +func (p period) TimeForPeriod(n int) time.Time { + return time.Unix(0, int64(p)*int64(n)) +} + // Do not specify without bit shifting. This allows us to // do shard index calcuations via bitwise & rather than modulos. const defaultHeadManagerStripeSize = 1 << 7 @@ -125,10 +143,13 @@ func (m *HeadManager) Stop() error { } func (m *HeadManager) Append(userID string, ls labels.Labels, chks index.ChunkMetas) error { - labelsBuilder := labels.NewBuilder(ls) // TSDB doesnt need the __name__="log" convention the old chunk store index used. - labelsBuilder.Del("__name__") - metric := labelsBuilder.Labels() + for i, l := range ls { + if l.Name == labels.MetricName { + ls = append(ls[:i], ls[i+1:]...) + break + } + } m.mtx.RLock() now := time.Now() @@ -140,18 +161,10 @@ func (m *HeadManager) Append(userID string, ls labels.Labels, chks index.ChunkMe m.mtx.RLock() } defer m.mtx.RUnlock() - rec := m.activeHeads.Append(userID, metric, chks) + rec := m.activeHeads.Append(userID, ls, chks) return m.active.Log(rec) } -func (p period) PeriodFor(t time.Time) int { - return int(t.UnixNano() / int64(p)) -} - -func (p period) TimeForPeriod(n int) time.Time { - return time.Unix(0, int64(p)*int64(n)) -} - func (m *HeadManager) Start() error { if err := os.RemoveAll(filepath.Join(m.dir, "scratch")); err != nil { return errors.Wrap(err, "removing tsdb scratch dir") @@ -166,17 +179,6 @@ func (m *HeadManager) Start() error { now := time.Now() curPeriod := m.period.PeriodFor(now) - toRemove, err := m.shippedTSDBsBeforePeriod(curPeriod) - if err != nil { - return err - } - - for _, x := range toRemove { - if err := os.RemoveAll(x); err != nil { - return errors.Wrapf(err, "removing tsdb: %s", x) - } - } - walsByPeriod, err := walsByPeriod(m.dir, m.period) if err != nil { return err @@ -185,7 +187,7 @@ func (m *HeadManager) Start() error { m.activeHeads = newTenantHeads(now, m.shards, m.metrics, m.log) for _, group := range walsByPeriod { - if group.period < (curPeriod) { + if group.period < curPeriod { if err := m.tsdbManager.BuildFromWALs( m.period.TimeForPeriod(group.period), group.wals, @@ -198,7 +200,7 @@ func (m *HeadManager) Start() error { } } - if group.period == curPeriod { + if group.period >= curPeriod { if err := recoverHead(m.dir, m.activeHeads, group.wals); err != nil { return errors.Wrap(err, "recovering tsdb head from wal") } @@ -308,21 +310,6 @@ func (m *HeadManager) Rotate(t time.Time) error { return nil } -func (m *HeadManager) shippedTSDBsBeforePeriod(period int) (res []string, err error) { - files, err := ioutil.ReadDir(managerPerTenantDir(m.dir)) - if err != nil { - return nil, err - } - for _, f := range files { - if id, ok := parseMultitenantTSDBPath(f.Name()); ok { - if found := m.period.PeriodFor(id.ts); found < period { - res = append(res, f.Name()) - } - } - } - return -} - type WalGroup struct { period int wals []WALIdentifier @@ -429,6 +416,7 @@ func recoverHead(dir string, heads *tenantHeads, wals []WALIdentifier) error { return err } + // labels are always written to the WAL before corresponding chunks if len(rec.Series.Labels) > 0 { tenant, ok := seriesMap[rec.UserID] if !ok { @@ -571,12 +559,12 @@ func (t *tenantHeads) Bounds() (model.Time, model.Time) { return model.Time(t.mint.Load()), model.Time(t.maxt.Load()) } -func (t *tenantHeads) tenantIndex(userID string, from, through model.Time) (idx Index, unlock func(), ok bool) { +func (t *tenantHeads) tenantIndex(userID string, from, through model.Time) (idx Index, ok bool) { i := t.shardForTenant(userID) t.locks[i].RLock() + defer t.locks[i].RUnlock() tenant, ok := t.tenants[i][userID] if !ok { - t.locks[i].RUnlock() return } @@ -584,47 +572,43 @@ func (t *tenantHeads) tenantIndex(userID string, from, through model.Time) (idx if t.chunkFilter != nil { idx.SetChunkFilterer(t.chunkFilter) } - return idx, t.locks[i].RUnlock, true + return idx, true } func (t *tenantHeads) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]ChunkRef, error) { - idx, unlock, ok := t.tenantIndex(userID, from, through) + idx, ok := t.tenantIndex(userID, from, through) if !ok { return nil, nil } - defer unlock() return idx.GetChunkRefs(ctx, userID, from, through, nil, shard, matchers...) } // Series follows the same semantics regarding the passed slice and shard as GetChunkRefs. func (t *tenantHeads) Series(ctx context.Context, userID string, from, through model.Time, res []Series, shard *index.ShardAnnotation, matchers ...*labels.Matcher) ([]Series, error) { - idx, unlock, ok := t.tenantIndex(userID, from, through) + idx, ok := t.tenantIndex(userID, from, through) if !ok { return nil, nil } - defer unlock() return idx.Series(ctx, userID, from, through, nil, shard, matchers...) } func (t *tenantHeads) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) { - idx, unlock, ok := t.tenantIndex(userID, from, through) + idx, ok := t.tenantIndex(userID, from, through) if !ok { return nil, nil } - defer unlock() return idx.LabelNames(ctx, userID, from, through, matchers...) } func (t *tenantHeads) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) { - idx, unlock, ok := t.tenantIndex(userID, from, through) + idx, ok := t.tenantIndex(userID, from, through) if !ok { return nil, nil } - defer unlock() return idx.LabelValues(ctx, userID, from, through, name, matchers...) } diff --git a/pkg/storage/stores/tsdb/head_manager_test.go b/pkg/storage/stores/tsdb/head_manager_test.go index 0b032e9e4fcaf..a0d41c2a351e1 100644 --- a/pkg/storage/stores/tsdb/head_manager_test.go +++ b/pkg/storage/stores/tsdb/head_manager_test.go @@ -2,7 +2,9 @@ package tsdb import ( "context" + "fmt" "math" + "sync" "testing" "time" @@ -308,3 +310,57 @@ func Test_HeadManager_Lifecycle(t *testing.T) { require.Equal(t, chunkMetasToChunkRefs(c.User, c.Labels.Hash(), c.Chunks), refs) } } + +func BenchmarkTenantHeads(b *testing.B) { + for _, tc := range []struct { + readers, writers int + }{ + { + readers: 10, + }, + { + readers: 100, + }, + { + readers: 1000, + }, + } { + b.Run(fmt.Sprintf("%d", tc.readers), func(b *testing.B) { + heads := newTenantHeads(time.Now(), defaultHeadManagerStripeSize, NewMetrics(nil), log.NewNopLogger()) + // 1000 series across 100 tenants + nTenants := 10 + for i := 0; i < 1000; i++ { + tenant := i % nTenants + ls := mustParseLabels(fmt.Sprintf(`{foo="bar", i="%d"}`, i)) + heads.Append(fmt.Sprint(tenant), ls, index.ChunkMetas{ + {}, + }) + } + + for n := 0; n < b.N; n++ { + var wg sync.WaitGroup + for r := 0; r < tc.readers; r++ { + wg.Add(1) + go func(r int) { + defer wg.Done() + var res []ChunkRef + tenant := r % nTenants + + // nolint:ineffassign,staticcheck + res, _ = heads.GetChunkRefs( + context.Background(), + fmt.Sprint(tenant), + 0, math.MaxInt64, + res, + nil, + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + ) + }(r) + } + + wg.Wait() + } + + }) + } +} From 41647d1302e12ec5a4867d4fcf126e2a08333631 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Fri, 6 May 2022 14:26:58 +0200 Subject: [PATCH 080/223] operator: Ruler enhancement proposal (#5985) --- .../docs/enhancements/enhancement_template.md | 139 +++ operator/docs/enhancements/ruler_support.md | 853 ++++++++++++++++++ 2 files changed, 992 insertions(+) create mode 100644 operator/docs/enhancements/enhancement_template.md create mode 100644 operator/docs/enhancements/ruler_support.md diff --git a/operator/docs/enhancements/enhancement_template.md b/operator/docs/enhancements/enhancement_template.md new file mode 100644 index 0000000000000..815af4f33e6ec --- /dev/null +++ b/operator/docs/enhancements/enhancement_template.md @@ -0,0 +1,139 @@ +--- +title: neat-enhancement-idea +authors: + - TBD +reviewers: # Include a comment about what domain expertise a reviewer is expected to bring and what area of the enhancement you expect them to focus on. For example: - "@networkguru, for networking aspects, please look at IP bootstrapping aspect" + - TBD +creation-date: yyyy-mm-dd +last-updated: yyyy-mm-dd +tracking-link: # link to the tracking ticket (for example: Github Issue) that corresponds to this enhancement + - TBD +see-also: + - "/enhancements/this-other-neat-thing.md" +replaces: + - "/enhancements/that-less-than-great-idea.md" +superseded-by: + - "/enhancements/our-past-effort.md" +--- + +To get started with this template: +1. **Make a copy of this template.** Copy this template into a new file. +2. **Fill out the "overview" sections.** This includes the Summary and + Motivation sections. These should be easy and explain why the community + should desire this enhancement. +3. **Create a PR.** Assign it to folks with expertise in that domain to help + sponsor the process. + +Start by filling out the header with the metadata for this enhancement. + +# Neat Enhancement Idea + +This is the title of the enhancement. Keep it simple and descriptive. A good +title can help communicate what the enhancement is and should be considered as +part of any review. + +The YAML `title` should be lowercased and spaces/punctuation should be +replaced with `-`. + +The `Metadata` section above is intended to support the creation of tooling +around the enhancement process. + +## Summary + +The `Summary` section is incredibly important for producing high quality +user-focused documentation such as release notes or a development roadmap. It +should be possible to collect this information before implementation begins in +order to avoid requiring implementors to split their attention between writing +release notes and implementing the feature itself. + +A good summary is no more than one paragraph in length. More detail +should go into the following sections. + +## Motivation + +This section is for explicitly listing the motivation, goals and non-goals of +this proposal. Describe why the change is important and the benefits to users. + +### Goals + +Summarize the specific goals of the proposal. How will we know that +this has succeeded? A good goal describes something a user wants from +their perspective, and does not include the implementation details +from the proposal. + +### Non-Goals + +What is out of scope for this proposal? Listing non-goals helps to +focus discussion and make progress. Highlight anything that is being +deferred to a later phase of implementation that may call for its own +enhancement. + +## Proposal + +This is where we get down to the nitty gritty of what the proposal +actually is. Describe clearly what will be changed, including all of +the components that need to be modified and how they will be +different. Include the reason for each choice in the design and +implementation that is proposed here, and expand on reasons for not +choosing alternatives in the Alternatives section at the end of the +document. + +### API Extensions + +API Extensions are CRDs, admission and conversion webhooks, aggregated API servers, +and finalizers, i.e. those mechanisms that change the Kubernetes API surface and behaviour. + +- Name the API extensions this enhancement adds or modifies. +- Does this enhancement modify the behaviour of existing resources, especially those owned + by other parties than the authoring team (including upstream resources), and, if yes, how? + Please add those other parties as reviewers to the enhancement. + + Examples: + - Adds a finalizer to namespaces. Namespace cannot be deleted without our controller running. + - Restricts the label format for objects to X. + - Defaults field Y on object kind Z. + +Fill in the operational impact of these API Extensions in the "Operational Aspects +of API Extensions" section. + +### Implementation Details/Notes/Constraints [optional] + +What are the caveats to the implementation? What are some important details that +didn't come across above. Go in to as much detail as necessary here. This might +be a good place to talk about core concepts and how they relate. + +### Risks and Mitigations + +What are the risks of this proposal and how do we mitigate. Think broadly. For +example, consider both security and how this will impact the larger operator +ecosystem. + +How will security be reviewed and by whom? + +How will UX be reviewed and by whom? + +Consider including folks that also work outside your immediate sub-project. + +## Design Details + +### Open Questions [optional] + +This is where to call out areas of the design that require closure before deciding +to implement the design. For instance, + > 1. This requires exposing previously private resources which contain sensitive + information. Can we do this? + +## Implementation History + +Major milestones in the life cycle of a proposal should be tracked in `Implementation +History`. + +## Drawbacks + +The idea is to find the best form of an argument why this enhancement should _not_ be implemented. + +## Alternatives + +Similar to the `Drawbacks` section the `Alternatives` section is used to +highlight and record other possible approaches to delivering the value proposed +by an enhancement. diff --git a/operator/docs/enhancements/ruler_support.md b/operator/docs/enhancements/ruler_support.md new file mode 100644 index 0000000000000..a7c9935d66bc0 --- /dev/null +++ b/operator/docs/enhancements/ruler_support.md @@ -0,0 +1,853 @@ +--- +title: lokistack ruler support +authors: + - @periklis +reviewers: + - @xperimental + - @cyriltovena +creation-date: 2022-04-21 +last-updated: 2022-04-21 +tracking-link: + - [5211](https://github.com/grafana/loki/issues/5211) + - [5843](https://github.com/grafana/loki/issues/5843) +see-also: + - [PR 5986](https://github.com/grafana/loki/pull/5986) +--- + +# LokiStack Ruler Support + +## Summary + +The ruler has been an integral part of Loki since release v2.2.0 and complete since v2.3.0 that added support for recording rules. This component enables users to give a group of LogQL expressions that trigger either alerts (e.g. high amount of error logs in production service) and/or post-evaluation recorded metrics (e.g. rate of incoming error logs on production service per minute). The following sections describe a set of APIs in form of custom resource definitions (CRD) that enable users of `LokiStack` resources to support: +- Operating the ruler optimized for a `LokiStack` size, i.e. `1x.extra-small`, `1x.small` and `1x.medium`. +- Define Loki alerting and recording rules using a Kubernetes custom resource (e.g. similar to `PrometheusRule` in [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator/)). +- Define a Kubernetes custom resource for the ruler to connect to a [remote write](https://grafana.com/docs/loki/latest/configuration/#ruler) endpoint and store log-based metrics. +- Define a Kubernetes custom resource for the ruler to connect to an [Alertmanager](https://grafana.com/docs/loki/latest/configuration/#ruler) endpoint and notify on firing alerts. + +## Motivation + +The Loki Operator manages `LokiStack` resources that consists of a set of Loki components for ingestion/quering and optionally a gateway microservice that ensures authenticated and authorized access to logs stored by Loki. The addition of the Loki ruler component initiates support for a new component type that acts as an automated user requesting logs from Loki. The ruler is supposed to work autonomously with direct access to object storage, i.e. acts as its own querier ([See more details](https://grafana.com/docs/loki/latest/operations/recording-rules/)) but respecting query [limits](https://grafana.com/docs/loki/latest/configuration/#limits_config). Thus the motivation of the following proposal is to enable operating the ruler as an integral but optional part of a `LokiStack` instance as well as supporting Kubernetes native configuration for declaring rules and external dependencies (e.g. remote_write, alertmanager). + +### Goals + +* The user can enable the ruler component via the `LokiStack` custom resource. +* The user can declare per-namespace custom resource for set of Loki alerting and recording rules. +* The user can optionally declare an endpoint for the `LokiStack` ruler to write recording-rule metrics. +* The user can optionally declare an endpoint for the `LokiStack` ruler to notify on firing alerts. +* The Loki Operator manages a ruler optimized for the selected `LokiStack` T-Shirt size. + +### Non-Goals + +* Reconciling stateless instances of the ruler component. +* Access rules and ruler configuration via the `LokiStack` gateway microservice. +* Support for viewing/editing rules via a UI tool of choice. +* Full-automated reconciliation of remote-write and AlertManager configuration on OpenShift. + +## Proposal + +The following enhancement proposal describes the required API additions and changes in the Loki Operator to add support for reconciling the Loki Ruler component as an integral part of `LokiStack` resources. In addition to the component itself it provides additional CRDs that enable users to declare alerting and recording rules. Furthermore it describes a CRD that allows to configure the ruler's `remote_write` and `alertmanager` connectivity capabilities. + +In summary the `LokiStack` ruler component will support by default minimum the following capabilities: +- Use the Work-Ahead-Log to enable higher availability and mitigate data loss for recording rules (See [here](https://grafana.com/docs/loki/latest/operations/recording-rules/#write-ahead-log-wal)). +- Use a ring to allow horizontal scaling of the ruler per group (See [here](https://grafana.com/docs/loki/latest/operations/recording-rules/#scaling)). +- Use sharding per default (See [enable_sharding](https://grafana.com/docs/loki/latest/configuration/#ruler)). +- Re-use the same store configuration of an existing `LokiStack` instance. +- Re-use the same ring configuration of an existing `LokiStack` instance (per default only memberlist supported). + +### API Extensions + +#### LokiStack Changes: Support for the ruler component + +The following section describes a set of changes that enable reconciling the Loki ruler component as an optional StatefulSet. The Loki ruler component spec inherits the same node placement capabilities as the rest of the `LokiStack` components, i.e. currently node-selectors and tolerations (defined in `LokiComponentSpec`). + +```go +// RulesSpec defines the spec for the ruler component. +type RulesSpec struct { + // Enabled defines a flag to enable/disable the ruler component + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enable" + Enabled bool `json:"enabled"` + + // A selector to select which Loki rules to mount for loading alerting/recording + // rules from. + // + // +optional + // +kubebuilder:validation:optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Selector" + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // Namespaces to be selected for AlertingRules/RecordingRules discovery. If unspecified, only + // the same namespace as the LokiStack object is in is used. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Namespace Selector" + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` +} + +// LokiStackSpec defines the desired state of LokiStack +type LokiStackSpec struct { +... + // Rules defines the spec for the ruler component + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rules" + Rules *RulesSpec `json:"rules,omitempty"` +... +} + +// LokiTemplateSpec defines the template of all requirements to configure +// scheduling of all Loki components to be deployed. +type LokiTemplateSpec struct { +... + // Ruler defines the ruler component spec. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ruler pods" + Ruler *LokiComponentSpec `json:"ruler,omitempty"` +} + +// LokiStackComponentStatus defines the map of per pod status per LokiStack component. +// Each component is represented by a separate map of v1.Phase to a list of pods. +type LokiStackComponentStatus struct { +... + // Ruler is a map to the per pod status of the lokistack ruler statefulset. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ruler",order=6 + Ruler PodStatusMap `json:"ruler,omitempty"` +} +``` + +#### AlertingRule definition + +The `AlertingRules` CRD comprises a set of specifications and webhook validation definitions to declare groups of alerting rules for a single `LokiStack` instance. The syntax for the rule groups resembles the official [Prometheus Rule syntax](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules). In addition the webhook validation definition provides support for rule validation conditions: + +1. If a `AlertingRule` includes an invalid `interval` period it is an invalid alerting rule +2. If a `AlertingRule` includes an invalid `for` period it is an invalid alerting rule. +3. If a `AlertingRule` includes an invalid LogQL `expr` it is an invalid alerting rule. +4. If a `AlertingRule` includes two groups with the same name it is an invalid alerting rule. +5. If none of above applies a `AlertingRule` is considered a valid alerting rule. + +```go +// AlertingRuleSpec defines the desired state of AlertingRule +type AlertingRuleSpec struct { + // Tenant to associate the alerting rule groups. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID" + TenantID string `json:"tenantID"` + + // List of groups for alerting rules. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Groups" + Groups []*AlertingRuleGroup `json:"groups"` +} + +// AlertingRuleGroup defines a group of Loki alerting rules. +type AlertingRuleGroup struct { + // Name defines a name of the present recoding/alerting rule. Must be unique + // within all loki rules. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name" + Name string `json:"name"` + + // Interval defines the time interval between evaluation of the given + // alerting rule. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1m" + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Evaluation Interval" + Interval PrometheusDuration `json:"interval"` + + // Limit defines the number of alerts an alerting rule can produce. 0 is no limit. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Limit of firing alerts" + Limit int32 `json:"limit,omitempty"` + + // Rules defines a list of alerting rules + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Rules" + Rules []*AlertingRuleGroupSpec `json:"rules"` +} + +// AlertingRuleGroupSpec defines the spec for a Loki alerting rule. +type AlertingRuleGroupSpec struct { + // The name of the alert. Must be a valid label value. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name" + Alert string `json:"alert,omitempty"` + + // The LogQL expression to evaluate. Every evaluation cycle this is + // evaluated at the current time, and all resultant time series become + // pending/firing alerts. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="LogQL Expression" + Expr string `json:"expr"` + + // Alerts are considered firing once they have been returned for this long. + // Alerts which have not yet fired for long enough are considered pending. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Firing Threshold" + For PrometheusDuration `json:"for,omitempty"` + + // Annotations to add to each alert. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Annotations" + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to add to each alert. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Labels" + Labels map[string]string `json:"labels,omitempty"` +} + +// AlertingRuleStatus defines the observed state of AlertingRule +type AlertingRuleStatus struct { + // Conditions of the AlertingRule generation health. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions" + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// AlertingRule is the Schema for the alertingrules API +// +// +operator-sdk:csv:customresourcedefinitions:displayName="AlertingRule",resources={{LokiStack,v1beta1}} +type AlertingRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AlertingRuleSpec `json:"spec,omitempty"` + Status AlertingRuleStatus `json:"status,omitempty"` +} +``` + +#### RecordingRule definition + +The `RecordingRule` CRD comprises a set of specifications and webhook validation definitions to declare groups of recording rules for a single `LokiStack` instance. The syntax for the rule groups resembles the official [Prometheus Rule syntax](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules). In addition the webhook validation definition provides support for rule validation conditions: + +1. If a `RecordingRule` includes an invalid `interval` period it is an invalid recording rule +2. If a `RecordingRule` includes an invalid metric name for `record` it is an invalid recording rule. +3. If a `RecordingRule` includes an invalid LogQL `expr` it is an invalid recording rule. +4. If a `RecordingRule` includes two groups with the same name it is an invalid recording rule. +4. If none of above applies a `RecordingRule` is considered a valid recording rule. + +```go +// RecordingRuleSpec defines the desired state of RecordingRule +type RecordingRuleSpec struct { + // Tenant to associate the recording rule groups. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID" + TenantID string `json:"tenantID"` + + // List of groups for recording rules. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Groups" + Groups []*RecordingRuleGroup `json:"groups"` +} + +// RecordingRuleGroup defines a group of Loki recording rules. +type RecordingRuleGroup struct { + // Name defines a name of the present recoding rule. Must be unique + // within all loki rules. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name" + Name string `json:"name"` + + // Interval defines the time interval between evaluation of the given + // recoding rule. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1m" + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Evaluation Interval" + Interval PrometheusDuration `json:"interval"` + + // Limit defines the number of series a recording rule can produce. 0 is no limit. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Limit of produced series" + Limit int32 `json:"limit,omitempty"` + + // Rules defines a list of recording rules + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Rules" + Rules []*RecordingRuleGroupSpec `json:"rules"` +} + +// RecordingRuleGroupSpec defines the spec for a Loki recording rule. +type RecordingRuleGroupSpec struct { + // The name of the time series to output to. Must be a valid metric name. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Metric Name" + Record string `json:"record,omitempty"` + + // The LogQL expression to evaluate. Every evaluation cycle this is + // evaluated at the current time, and all resultant time series become + // pending/firing alerts. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="LogQL Expression" + Expr string `json:"expr"` +} + +// RecordingRuleStatus defines the observed state of RecordingRule +type RecordingRuleStatus struct { + // Conditions of the RecordingRule generation health. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions" + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// RecordingRule is the Schema for the recordingrules API +// +// +operator-sdk:csv:customresourcedefinitions:displayName="RecordingRule",resources={{LokiStack,v1beta1}} +type RecordingRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RecordingRuleSpec `json:"spec,omitempty"` + Status RecordingRuleStatus `json:"status,omitempty"` +} +``` + +#### RulerConfig definition + +The following CRD defines the ruler configuration to access AlertManager hosts and to access a global Remote-Write-Endpoint (e.g. Prometheus, Thanos, Cortex). The types are split in three groups: +1. `RulerSpec`: This spec includes general settings like evalution and poll interval. +2. `AlertManagerSpec`: This spec includes all settings for pushing alert notifications to a list of AlertManager hosts. +3. `RemoteWriteSpec`: This spec includes all settings to configure a single global remote write endpoint to send recorded metrics. + +**Note**: Sensitive authorization information are provided by a Kubernetes Secret resource, i.e. basic auth user/password, header authorization (See `AuthorizationSecretName`). The Secret is required to live in the same namespace as the `RulerConfig` resource. + +```go +package v1beta1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LokiDuration defines the type for Prometheus durations. +// +// +kubebuilder:validation:Pattern:="((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)" +type LokiDuration string + +// AlertManagerDiscoverySpec defines the configuration to use DNS resolution for AlertManager hosts. +type AlertManagerDiscoverySpec struct { + // Use DNS SRV records to discover Alertmanager hosts. + // + // +optional + // +kubebuilder:validation:Optional + Enabled bool `json:"enabled"` + + // How long to wait between refreshing DNS resolutions of Alertmanager hosts. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1m" + RefreshInterval LokiDuration `json:"refreshInterval"` +} + +// AlertManagerNotificationSpec defines the configuration for AlertManager notification settings. +type AlertManagerNotificationSpec struct { + // Capacity of the queue for notifications to be sent to the Alertmanager. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=10000 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Notification Queue Capacity" + QueueCapacity int32 `json:"queueCapacity"` + + // HTTP timeout duration when sending notifications to the Alertmanager. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="10s" + Timeout LokiDuration `json:"timeout"` + + // Max time to tolerate outage for restoring "for" state of alert. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1h" + ForOutageTolerance LokiDuration `json:"forOutageTolerance"` + + // Minimum duration between alert and restored "for" state. This is maintained + // only for alerts with configured "for" time greater than the grace period. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="10m" + ForGracePeriod LokiDuration `json:"forGracePeriod"` + + // Minimum amount of time to wait before resending an alert to Alertmanager. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1m" + ResendDelay LokiDuration `json:"resendDelay"` +} + +// AlertManagerSpec defines the configuration for ruler's alertmanager connectivity. +type AlertManagerSpec struct { + // URL for alerts return path. + // + // +optional + // +kubebuilder:validation:Optional + ExternalURL string `json:"external_url,omitempty"` + + // Additional labels to add to all alerts. + // + // +optional + // +kubebuilder:validation:Optional + ExternalLabels map[string]string `json:"external_labels,omitempty"` + + // If enabled, then requests to Alertmanager use the v2 API. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enable AlertManager V2 API" + EnableV2 bool `json:"enableV2"` + + // List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as + // a separate group in the configuration. Multiple Alertmanagers in HA per group can be + // supported by using DNS resolution (See EnableDNSDiscovery). + // + // +required + // +kubebuilder:validation:Required + Endpoints []string `json:"endpoints"` + + // Defines the configuration for DNS-based discovery of AlertManager hosts. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="DNS Discovery" + DiscoverySpec *AlertManagerDiscoverySpec `json:"discoverySpec"` + + // Defines the configuration for the notification queue to AlertManager hosts. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Notification Queue" + NotificationSpec *AlertManagerNotificationSpec `json:"notificationSpec"` +} + +// RemoteWriteAuthType defines the type of authorization to use to access the remote write endpoint. +// +// +kubebuilder:validation:Enum=basic;header +type RemoteWriteAuthType string + +const ( + // BasicAuthorization defines the remote write client to use HTTP basic authorization. + BasicAuthorization RemoteWriteAuthType = "basic" + // BasicAuthorization defines the remote write client to use HTTP header authorization. + HeaderAuthorization RemoteWriteAuthType = "header" +) + +// RemoteWriteClientSpec defines the configuration of the remote write client. +type RemoteWriteClientSpec struct { + // Name of the remote write config, which if specified must be unique among remote write configs. + // + // +required + // +kubebuilder:validation:Required + Name string `json:"name"` + + // The URL of the endpoint to send samples to. + // + // +required + // +kubebuilder:validation:Required + URL string `json:"url"` + + // Timeout for requests to the remote write endpoint. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="30s" + Timeout LokiDuration `json:"timeout"` + + // Type of Authorzation to use to access the remote write endpoint + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:basic","urn:alm:descriptor:com.tectonic.ui:select:header"},displayName="Authorization Type" + AuthorizationType RemoteWriteAuthType `json:"authorization"` + + // Name of a secret in the namespace configured for authorization secrets. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Authorization Secret Name" + AuthorizationSecretName string `json:"authorizationSecretName"` + + // Additional HTTP headers to be sent along with each remote write request. + // + // +optional + // +kubebuilder:validation:Optional + AdditionalHeaders map[string]string `json:"additionalHeaders,omitempty"` + + // List of remote write relabel configurations. + // + // +optional + // +kubebuilder:validation:Optional + RelabelConfigs []monitoringv1.RelabelConfig `json:"relabelConfigs,omitempty"` + + // Optional proxy URL. + // + // +optional + // +kubebuilder:validation:Optional + ProxyURL string `json:"proxyUrl"` + + // Configure whether HTTP requests follow HTTP 3xx redirects. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=true + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Follow HTTP Redirects" + FollowRedirects bool `json:"followRedirects"` +} + +// RemoteWriteClientQueueSpec defines the configuration of the remote write client queue. +type RemoteWriteClientQueueSpec struct { + // Number of samples to buffer per shard before we block reading of more + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=2500 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Queue Capacity" + Capacity int32 `json:"capacity"` + + // Maximum number of shards, i.e. amount of concurrency. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=200 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Maximum Shards" + MaxShards int32 `json:"maxShards"` + + // Minimum number of shards, i.e. amount of concurrency. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=200 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Minimum Shards" + MinShards int32 `json:"minShards"` + + // Maximum number of samples per send. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=500 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Maximum Shards per Send" + MaxSamplesPerSend int32 `json:"maxSamplesPerSend"` + + // Maximum time a sample will wait in buffer. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="5s" + BatchSendDeadline int32 `json:"batchSendDeadline"` + + // Initial retry delay. Gets doubled for every retry. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="30ms" + MinBackOffPeriod int32 `json:"minBackOffPeriod"` + + // Maximum retry delay. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="100ms" + MaxBackOffPeriod int32 `json:"maxBackOffPeriod"` +} + +// RemoteWriteSpec defines the configuration for ruler's remote_write connectivity. +type RemoteWriteSpec struct { + // Enable remote-write functionality. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enabled" + Enabled bool `json:"enabled"` + + // Minimum period to wait between refreshing remote-write reconfigurations. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="10s" + RefreshPeriod LokiDuration `json:"refreshPeriod"` + + // Defines the configuration for remote write client. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Client" + ClientSpec *RemoteWriteClientSpec `json:"clientSpec"` + + // Defines the configuration for remote write client queue. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Client Queue" + QueueSpec *RemoteWriteClientQueueSpec `json:"queueSpec"` +} + +// RulerSpec defines the desired state of Ruler +type RulerSpec struct { + // Interval on how frequently to evaluate rules. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1m" + EvalutionInterval LokiDuration `json:"evaluationInterval"` + + // Interval on how frequently to poll for new rule definitions. + // + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:="1m" + PollInterval LokiDuration `json:"pollInterval"` + + // Defines alert manager configuration to notify on firing alerts. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Alert Manager Configuration" + AlertManagerSpec *AlertManagerSpec `json:"alertmanager,omitempty"` + + // Defines a remote write endpoint to write recording rule metrics. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Remote Write Configuration" + RemoteWriteSpec *RemoteWriteSpec `json:"remoteWrite,omitempty"` +} + +// RulerStatus defines the observed state of Ruler +type RulerStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Ruler is the Schema for the lokirulers API +type Ruler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RulerSpec `json:"spec,omitempty"` + Status RulerStatus `json:"status,omitempty"` +} +``` + +### Implementation Details/Notes/Constraints + +#### AlertingRule and RecordingRule reconciliation + +`AlertringRule` and `RecordingRule` custom resources are transformed into a single ruler configuration file for a given `LokiStack` per type (i.e one for alerting and one for recording rules). The following approach is taken: +1. The `LokiStackController` filters all available cluster namespaces by `RulesSpec.NamespaceSelector`. +2. The `LokiStackController` filters first all available `AlertingRule` and `RecordingRule` custom resources by `RulesSpec.Selector` and further by the filtered list of namespaces from the previous step. +3. For `AlertingRule` and `RecordingRule` it transforms the final list into a ConfigMap: +```yaml +apiVersion: loki.grafana.com/v1beta1 +kind: AlertingRule +metadata: + name: alerting-rule-a + namespace: ns-a + UID: kube-uid-a +spec: + tenantID: application + groups: ... +--- +apiVersion: loki.grafana.com/v1beta1 +kind: AlertingRule +metadata: + name: alerting-rule-b + namespace: ns-b + UID: kube-uid-b +spec: + tenantID: infrastructure + groups: ... +--- +apiVersion: loki.grafana.com/v1beta1 +kind: RecordingRule +metadata: + name: recording-rule-a + namespace: ns-a + UID: kube-uid-c +spec: + tenantID: application + groups: ... +--- +apiVersion: loki.grafana.com/v1beta1 +kind: RecordingRule +metadata: + name: recording-rule-b + namespace: ns-b + UID: kube-uid-c +spec: + tenantID: infrastructure + groups: ... +``` + +results in: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: lokistack-dev-alerting-rules + namespace: lokistack-ns +data: + "ns-a-alerting-rule-a-kube-uid-a.yaml": | + ... + "ns-b-alerting-rule-b-kube-uid-b.yaml": | + ... + "ns-a-recording-rule-a-kube-uid-c.yaml": | + ... + "ns-b-recording-rule-b-kube-uid-c.yaml": | + ... +``` + +In addition the ruler's volumes specs are split in `v1.KeyToPath` items based on `AlertingRuleSpec.TenantID` and `RecordingRuleSpec.TenantID`, i.e: +```yaml +spec: + template: + spec: + containers: + - name: "ruler" + volumeMounts: + - name: "rules" + volume: "rules" + path: "/tmp/rules" + volumes: + - name: "rules" + items: + - key: "ns-a-alerting-rule-a-kube-uid-a.yaml" + path: "application/ns-a-alerting-rule-a-kube-uid-a.yaml" + - key: "ns-b-alerting-rule-b-kube-uid-b.yaml" + path: "infrastructure/ns-b-alerting-rule-b-kube-uid-b.yaml" + - key: "ns-a-recording-rule-a-kube-uid-c.yaml" + path: "application/ns-a-recording-rule-a-kube-uid-c.yaml" + - key: "ns-b-recording-rule-b-kube-uid-d.yaml" + path: "infrastructure/ns-b-recording-rule-b-kube-uid-d.yaml" +``` + +In turn the rules directory is outlined as such: + +``` +/tmp/rules/application/ns-a-alerting-rule-a-kube-uid-a.yaml + /application/ns-a-recording-rule-a-kube-uid-b.yaml + /infrastructure/ns-b-alerting-rule-b-kube-uid-c.yaml + /infrastructure/ns-b-recording-rule-b-kube-uid-d.yaml +``` + +5. The `AlertingRuleController` listens for `AlertingRule` create/update/delete events and it applies the `loki.grafana.com/rulesDiscoveredAt: time.Now().Format(time.RFC3339)` on each `LokiStack` instance on the cluster. This ensures that the `LokiStack` reconciliation loop starts anew. +6. The `RecordingRuleController` listens for `RecordingRule` create/update/delete events and it applies the `loki.grafana.com/rulesDiscoveredAt: time.Now().Format(time.RFC3339)` on each `LokiStack` instance on the cluster. This ensures that the `LokiStack` reconciliation loop starts anew. + +In summary this approach allows to group all `AlertingRule` and `RecordingRule` custom resources in the **same** namespace as the `LokiStack` in a two dedicated ConfigMap, i.e. where the stack runs is the place where the rules live. + +#### RulerConfig reconciliation + +The `RulerConfig` custom resource is not transformed into a specific ruler configuration file per se. It is considered an optional companion to the `LokiStack` custom resource. Thus the existing `LokiStackController` listens for `RulerConfig` create/update/delete events on the same namespace as for a `LokiStack` custom resource and reconciles the final ruler configuration. + +In detail the separation of concerns between both CRDs looks like: +1. `LokiStack`: Defines all required aspects to spin up a Loki ruler component, i.e. size, node placement. In addition it controls the common config settings for the Work-Ahead-Log and Ring configuration. +2. `RulerConfig`: Defines only the global runtime settings for the ruler, i.e. evaluation/poll intervals, AlertManager configuration, remote write client configuration. + +#### General constraints + +1. The above `RulerConfig` is limited to a single global remote write endpoint for exporting metrics from recording rules. This leaves solving multi-tenancy issues on the remote write server side and provide means (e.g. headers) for spliting/amending metrics ingestion per tenant. On the other hand it simplifies ruler operations as it does not require to spin concurrent remote write clients per tenant. +2. Additionally the `RulerConfig` requires a user-provided Kubernetes Secret for sensitive information. This adds an extra dependency that requires validation inside the controller-loop. + +### Risks and Mitigations + +#### Loki Ruler Configuration versioning + +**Risk**: The `RulerConfig` CRD exposes an almost identical [ruler config](https://grafana.com/docs/loki/latest/configuration/#ruler) to support AlertManager and Remote-Write. Both sub-specifications are subject of change to the API version of the server endpoint (i.e. AlertManager, Remote-Write). Although AlertManager is using a versioned approach e.g. `EnableV2` switch, we miss a similar approach accross remote write endpoints. + +**Mitigation**: We require a support matrix for `RulerConfig` versions to AlertManager API versions and Remote Write implementations. For later we could use a list of releases (e.g. Prometheus 2.x, Thanos 0.20.z). + +#### Two ConfigMaps for all RecordingRule and AlertingRule custom resources + +**Risk**: The proposed reconciliation approach manifests that all `AlertingRule` instances are transformed to individual entries in a single ConfigMap. Alerting rules can be become quite big (e.g. large LogQL expresssions). As per ConfigMaps represent Kubernetes resource stored in etcd, this might hit some store limits (See [1MiB limit](https://kubernetes.io/docs/concepts/configuration/configmap/#motivation)). Therefore storing all rules in a single ConfigMap might become impossible over time or on large clusters. + +**Mitigation**: Introduce some sort of sharding of `AlertingRule** custom resources into multiple ConfigMap resources. + +**Note**: The same applies to `RecordingRule` + +## Design Details + +### Open Questions [optional] + +1.0000 Do we need to add support for Remote Write Configuration per tenant? (See `ruler_remote_write_*` parameters in [limits_config](https://grafana.com/docs/loki/latest/configuration/#limits_config)) +2. Do we need to add support for the `ruler_evaluation_delay_duration`, `ruler_max_rules_per_rule_group` and `ruler_max_rule_groups_per_tenant` limits? + +## Implementation History + +* 2022-04-21: Initial draft proposal +* 2022-04-21: Spike implementation for `LokiRule` reconciliation and `RulerConfig` types. (See [PR](https://github.com/grafana/loki/pull/5986)) +* 2022-04-26: Update draft to use `LokiRule` types to use webhook-based validation and `LokiRuleSpec.Selector`, `LokiRuleSpec.NamespaceSelector`. (See [PR](https://github.com/grafana/loki/pull/5986)) +* 2022-04-28: Update draft to split `LokiRule` into two distinct types `AlertingRule` and `RecordingRule`. (See [PR](https://github.com/grafana/loki/pull/5986)) +* 2022-04-28: Renamed `LokiRulerConfig` types to `RulerConfig` as per `Loki` prefix is part of the API group `loki.grafana.com`, i.e. fully qualified type is `ruler.loki.grafana.com` +* 2022-05-02: Update rule configmap generation based on per tenant subdirectory structure. +* 2022-05-03: Update rule configmap entry naming to use Kubernetes `Metadata.UID` as permanent suffix. + +## Drawbacks + +The above proposed design and implementation for LokiStack Ruler support adds a significant amount of new APIs as well as complexity into exposing a declarative approach **only** for the ruler component. Regardless the hard effort to minimize the amount of configuration settings in the proposed CRDs it remains still a huge addition to the operator code base. In contrast to the existing `LokiStack` CRD that is a very slim set of Loki settings (Note: without considering the gateway tenant configuration), the `RulerConfig` is almost one-to-one identical to the [ruler config](https://grafana.com/docs/loki/latest/configuration/#ruler). + +## Alternatives + +Alternatives providing support for Loki alerting and recording rules: +1. [opsgy/loki-rule-operator](https://github.com/opsgy/loki-rule-operator) +2. Add here anything missed From 756fc19577d869c12a9c91eb00080f181539b83e Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 6 May 2022 12:43:17 -0400 Subject: [PATCH 081/223] fix mutability bug with labels during tsdb flushing (#6116) --- pkg/ingester/flush.go | 2 ++ pkg/storage/stores/tsdb/head_manager.go | 11 +++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go index dcdb0f64226bc..44bd29b306193 100644 --- a/pkg/ingester/flush.go +++ b/pkg/ingester/flush.go @@ -354,6 +354,8 @@ func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelP return err } + // NB(owen-d): No longer needed in TSDB (and is removed in that code path) + // It's required by historical index stores so we keep it for now. labelsBuilder := labels.NewBuilder(labelPairs) labelsBuilder.Set(nameLabel, logsValue) metric := labelsBuilder.Labels() diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go index 7ab5003cdb50e..936a6f3556e70 100644 --- a/pkg/storage/stores/tsdb/head_manager.go +++ b/pkg/storage/stores/tsdb/head_manager.go @@ -144,12 +144,11 @@ func (m *HeadManager) Stop() error { func (m *HeadManager) Append(userID string, ls labels.Labels, chks index.ChunkMetas) error { // TSDB doesnt need the __name__="log" convention the old chunk store index used. - for i, l := range ls { - if l.Name == labels.MetricName { - ls = append(ls[:i], ls[i+1:]...) - break - } - } + // We must create a copy of the labels here to avoid mutating the existing + // labels when writing across index buckets. + b := labels.NewBuilder(ls) + b.Del(labels.MetricName) + ls = b.Labels() m.mtx.RLock() now := time.Now() From 729bf3fc91e205d2de71775097cd6d5a63df59b0 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 6 May 2022 13:56:47 -0400 Subject: [PATCH 082/223] loads previously created TSDBs into shipper on startup (#6117) * loads previously created TSDBs into shipper on startup * noopTSDBManager impl * info logs for initial tsdb loading --- .../stores/indexshipper/uploads/table.go | 18 ++-- pkg/storage/stores/tsdb/head_manager.go | 7 ++ pkg/storage/stores/tsdb/head_manager_test.go | 1 + pkg/storage/stores/tsdb/manager.go | 84 +++++++++++++++++++ 4 files changed, 105 insertions(+), 5 deletions(-) diff --git a/pkg/storage/stores/indexshipper/uploads/table.go b/pkg/storage/stores/indexshipper/uploads/table.go index f88283581eb9d..e782df18a47e8 100644 --- a/pkg/storage/stores/indexshipper/uploads/table.go +++ b/pkg/storage/stores/indexshipper/uploads/table.go @@ -82,12 +82,20 @@ func (lt *table) ForEach(userID string, callback index.ForEachIndexCallback) err lt.indexSetMtx.RLock() defer lt.indexSetMtx.RUnlock() - idxSet, ok := lt.indexSet[userID] - if !ok { - return nil - } + // TODO(owen-d): refactor? Uploads mgr never has user indices, + // only common (multitenant) ones. + // iterate through both user and common index + for _, uid := range []string{userID, ""} { + idxSet, ok := lt.indexSet[uid] + if !ok { + continue + } - return idxSet.ForEach(callback) + if err := idxSet.ForEach(callback); err != nil { + return err + } + } + return nil } // Upload uploads the index to object storage. diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go index 936a6f3556e70..dc841012e3d95 100644 --- a/pkg/storage/stores/tsdb/head_manager.go +++ b/pkg/storage/stores/tsdb/head_manager.go @@ -182,9 +182,16 @@ func (m *HeadManager) Start() error { if err != nil { return err } + level.Info(m.log).Log("msg", "loaded wals by period", "groups", len(walsByPeriod)) m.activeHeads = newTenantHeads(now, m.shards, m.metrics, m.log) + // Load the shipper with any previously built TSDBs + if err := m.tsdbManager.Start(); err != nil { + return errors.Wrap(err, "failed to start tsdb manager") + } + + // Build any old WALs into TSDBs for the shipper for _, group := range walsByPeriod { if group.period < curPeriod { if err := m.tsdbManager.BuildFromWALs( diff --git a/pkg/storage/stores/tsdb/head_manager_test.go b/pkg/storage/stores/tsdb/head_manager_test.go index a0d41c2a351e1..0aca98d562743 100644 --- a/pkg/storage/stores/tsdb/head_manager_test.go +++ b/pkg/storage/stores/tsdb/head_manager_test.go @@ -22,6 +22,7 @@ import ( type noopTSDBManager struct{ NoopIndex } func (noopTSDBManager) BuildFromWALs(_ time.Time, _ []WALIdentifier) error { return nil } +func (noopTSDBManager) Start() error { return nil } func chunkMetasToChunkRefs(user string, fp uint64, xs index.ChunkMetas) (res []ChunkRef) { for _, x := range xs { diff --git a/pkg/storage/stores/tsdb/manager.go b/pkg/storage/stores/tsdb/manager.go index 1ee6e04ccfc66..5b9cffc4abe96 100644 --- a/pkg/storage/stores/tsdb/manager.go +++ b/pkg/storage/stores/tsdb/manager.go @@ -3,8 +3,10 @@ package tsdb import ( "context" "fmt" + "io/ioutil" "math" "path/filepath" + "strconv" "sync" "time" @@ -24,6 +26,7 @@ import ( // TSDBManager wraps the index shipper and writes/manages // TSDB files on disk type TSDBManager interface { + Start() error Index // Builds a new TSDB file from a set of WALs BuildFromWALs(time.Time, []WALIdentifier) error @@ -68,6 +71,87 @@ func NewTSDBManager( } } +func (m *tsdbManager) Start() (err error) { + var ( + buckets, indices, loadingErrors int + ) + + defer func() { + level.Info(m.log).Log( + "msg", "loaded leftover local indices", + "err", err, + "successful", err == nil, + "buckets", buckets, + "indices", indices, + "failures", loadingErrors, + ) + }() + + // load list of multitenant tsdbs + mulitenantDir := managerMultitenantDir(m.dir) + files, err := ioutil.ReadDir(mulitenantDir) + if err != nil { + return err + } + + for _, f := range files { + if !f.IsDir() { + continue + } + + bucket, err := strconv.Atoi(f.Name()) + if err != nil { + level.Warn(m.log).Log( + "msg", "failed to parse bucket in multitenant dir ", + "err", err.Error(), + ) + continue + } + buckets++ + + tsdbs, err := ioutil.ReadDir(filepath.Join(mulitenantDir, f.Name())) + if err != nil { + level.Warn(m.log).Log( + "msg", "failed to open period bucket dir", + "bucket", bucket, + "err", err.Error(), + ) + continue + } + + for _, db := range tsdbs { + id, ok := parseMultitenantTSDBPath(db.Name()) + if !ok { + continue + } + indices++ + + prefixed := newPrefixedIdentifier(id, filepath.Join(mulitenantDir, f.Name()), "") + loaded, err := NewShippableTSDBFile( + prefixed, + false, + ) + + if err != nil { + level.Warn(m.log).Log( + "msg", "", + "tsdbPath", prefixed.Path(), + "err", err.Error(), + ) + loadingErrors++ + } + + if err := m.shipper.AddIndex(f.Name(), "", loaded); err != nil { + loadingErrors++ + return err + } + } + + } + + return nil +} + func (m *tsdbManager) BuildFromWALs(t time.Time, ids []WALIdentifier) (err error) { level.Debug(m.log).Log("msg", "building WALs", "n", len(ids), "ts", t) // get relevant wals From 24f8b19e9106dfa02d224570b2b744dc51bb54b8 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Fri, 6 May 2022 20:14:00 +0200 Subject: [PATCH 083/223] Remove unused grpc health check endpoints (#6113) We have generic `grpc_healthcheck` endpoint that get registered for all the components during initialization stage. Looks like these health checks that are part of actual component's struct itself came from legacy. Signed-off-by: Kaviraj --- pkg/distributor/distributor.go | 10 ---------- pkg/ingester/ingester.go | 9 --------- 2 files changed, 19 deletions(-) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 369fe7a6b6334..577101438b9fd 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -19,7 +19,6 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" "go.uber.org/atomic" - "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/dskit/tenant" @@ -428,15 +427,6 @@ func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.Instance return err } -// Check implements the grpc healthcheck -func (d *Distributor) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { - status := grpc_health_v1.HealthCheckResponse_SERVING - if (d.State() != services.Running) || (d.distributorsLifecycler.GetState() != ring.ACTIVE) { - status = grpc_health_v1.HealthCheckResponse_NOT_SERVING - } - return &grpc_health_v1.HealthCheckResponse{Status: status}, nil -} - func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream *logproto.Stream) (string, error) { labelVal, ok := d.labelCache.Get(key) if ok { diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index c6444d0c13c79..cb7cb70cca512 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -778,15 +778,6 @@ func (i *Ingester) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo return instance.Series(ctx, req) } -// Check implements grpc_health_v1.HealthCheck. -func (i *Ingester) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { - status := grpc_health_v1.HealthCheckResponse_SERVING - if (i.State() != services.Running) || (i.lifecycler.GetState() != ring.ACTIVE) { - status = grpc_health_v1.HealthCheckResponse_NOT_SERVING - } - return &grpc_health_v1.HealthCheckResponse{Status: status}, nil -} - // Watch implements grpc_health_v1.HealthCheck. func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error { return nil From c2e43dd8f507dc51672866d5b3ae98ad7a7e647e Mon Sep 17 00:00:00 2001 From: Irina Date: Fri, 6 May 2022 19:15:39 +0100 Subject: [PATCH 084/223] Bump k8s.io/api, k8s.io/client-go, k8s.io/apimachinery from 0.22.7 to 0.23.6 (#6037) --- go.mod | 22 +- go.sum | 56 +- .../googleapis/gnostic/compiler/README.md | 3 +- .../googleapis/gnostic/compiler/context.go | 20 +- .../googleapis/gnostic/compiler/error.go | 15 +- .../gnostic/compiler/extension-handler.go | 101 - .../googleapis/gnostic/compiler/extensions.go | 85 + .../googleapis/gnostic/compiler/helpers.go | 347 +- .../googleapis/gnostic/compiler/main.go | 2 +- .../googleapis/gnostic/compiler/reader.go | 116 +- .../googleapis/gnostic/extensions/README.md | 12 +- .../gnostic/extensions/extension.pb.go | 531 +- .../gnostic/extensions/extension.proto | 30 +- .../gnostic/extensions/extensions.go | 68 +- .../googleapis/gnostic/jsonschema/README.md | 4 + .../googleapis/gnostic/jsonschema/base.go | 84 + .../googleapis/gnostic/jsonschema/display.go | 229 + .../googleapis/gnostic/jsonschema/models.go | 228 + .../gnostic/jsonschema/operations.go | 394 + .../googleapis/gnostic/jsonschema/reader.go | 442 + .../googleapis/gnostic/jsonschema/schema.json | 150 + .../googleapis/gnostic/jsonschema/writer.go | 369 + .../googleapis/gnostic/openapiv2/OpenAPIv2.go | 3615 ++++---- .../gnostic/openapiv2/OpenAPIv2.pb.go | 8200 +++++++++++------ .../gnostic/openapiv2/OpenAPIv2.proto | 7 +- .../googleapis/gnostic/openapiv2/README.md | 20 +- .../googleapis/gnostic/openapiv2/document.go | 41 + .../gnostic/openapiv2/openapi-2.0.json | 4 +- .../api/admissionregistration/v1/types.go | 6 + .../v1/zz_generated.deepcopy.go | 1 + .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../v1alpha1/generated.proto | 3 + .../api/apiserverinternal/v1alpha1/types.go | 5 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 1 + vendor/k8s.io/api/apps/v1/generated.pb.go | 573 +- vendor/k8s.io/api/apps/v1/generated.proto | 62 +- vendor/k8s.io/api/apps/v1/types.go | 89 +- .../apps/v1/types_swagger_doc_generated.go | 56 +- .../api/apps/v1/zz_generated.deepcopy.go | 22 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 550 +- .../k8s.io/api/apps/v1beta1/generated.proto | 38 +- vendor/k8s.io/api/apps/v1beta1/types.go | 55 +- .../v1beta1/types_swagger_doc_generated.go | 35 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 22 + .../zz_generated.prerelease-lifecycle.go | 1 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 590 +- .../k8s.io/api/apps/v1beta2/generated.proto | 49 +- vendor/k8s.io/api/apps/v1beta2/types.go | 67 +- .../v1beta2/types_swagger_doc_generated.go | 43 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 22 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/authentication/v1/generated.proto | 6 + vendor/k8s.io/api/authentication/v1/types.go | 10 + .../v1/types_swagger_doc_generated.go | 12 +- .../v1/zz_generated.deepcopy.go | 1 + .../authentication/v1beta1/generated.proto | 4 +- .../api/authentication/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 7 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/authorization/v1/generated.proto | 9 + vendor/k8s.io/api/authorization/v1/types.go | 9 + .../v1/types_swagger_doc_generated.go | 29 +- .../authorization/v1/zz_generated.deepcopy.go | 1 + .../api/authorization/v1beta1/generated.proto | 9 + .../k8s.io/api/authorization/v1beta1/types.go | 9 + .../v1beta1/types_swagger_doc_generated.go | 29 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../k8s.io/api/autoscaling/v1/generated.proto | 1 + vendor/k8s.io/api/autoscaling/v1/types.go | 2 + .../autoscaling/v1/zz_generated.deepcopy.go | 1 + vendor/k8s.io/api/autoscaling/v2/doc.go | 21 + .../k8s.io/api/autoscaling/v2/generated.pb.go | 6599 +++++++++++++ .../k8s.io/api/autoscaling/v2/generated.proto | 504 + vendor/k8s.io/api/autoscaling/v2/register.go | 50 + vendor/k8s.io/api/autoscaling/v2/types.go | 545 ++ .../v2/types_swagger_doc_generated.go | 299 + .../autoscaling/v2/zz_generated.deepcopy.go | 610 ++ .../api/autoscaling/v2beta1/generated.proto | 1 + .../k8s.io/api/autoscaling/v2beta1/types.go | 3 +- .../v2beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 3 +- .../api/autoscaling/v2beta2/generated.proto | 1 + .../k8s.io/api/autoscaling/v2beta2/types.go | 4 +- .../v2beta2/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 15 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 473 +- vendor/k8s.io/api/batch/v1/generated.proto | 58 +- vendor/k8s.io/api/batch/v1/types.go | 75 +- .../batch/v1/types_swagger_doc_generated.go | 34 +- .../api/batch/v1/zz_generated.deepcopy.go | 38 + .../batch/v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/certificates/v1/generated.pb.go | 143 +- .../api/certificates/v1/generated.proto | 26 +- vendor/k8s.io/api/certificates/v1/types.go | 27 +- .../v1/types_swagger_doc_generated.go | 19 +- .../certificates/v1/zz_generated.deepcopy.go | 6 + .../api/certificates/v1beta1/generated.pb.go | 143 +- .../api/certificates/v1beta1/generated.proto | 33 +- .../k8s.io/api/certificates/v1beta1/types.go | 35 +- .../v1beta1/types_swagger_doc_generated.go | 19 +- .../v1beta1/zz_generated.deepcopy.go | 6 + .../zz_generated.prerelease-lifecycle.go | 1 + .../coordination/v1/zz_generated.deepcopy.go | 1 + .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/core/v1/annotation_key_constants.go | 11 +- vendor/k8s.io/api/core/v1/generated.pb.go | 4618 ++++++---- vendor/k8s.io/api/core/v1/generated.proto | 435 +- vendor/k8s.io/api/core/v1/register.go | 1 - vendor/k8s.io/api/core/v1/types.go | 556 +- .../core/v1/types_swagger_doc_generated.go | 189 +- .../api/core/v1/zz_generated.deepcopy.go | 207 +- .../k8s.io/api/discovery/v1/generated.proto | 1 + vendor/k8s.io/api/discovery/v1/types.go | 2 + .../api/discovery/v1/zz_generated.deepcopy.go | 1 + .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/events/v1/zz_generated.deepcopy.go | 1 + .../events/v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/extensions/v1beta1/generated.proto | 17 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 19 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/flowcontrol/v1alpha1/generated.proto | 10 +- .../k8s.io/api/flowcontrol/v1alpha1/types.go | 18 +- .../v1alpha1/types_swagger_doc_generated.go | 9 +- .../v1alpha1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 9 +- .../api/flowcontrol/v1beta1/generated.proto | 10 +- .../k8s.io/api/flowcontrol/v1beta1/types.go | 58 +- .../v1beta1/types_swagger_doc_generated.go | 9 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 29 + vendor/k8s.io/api/flowcontrol/v1beta2/doc.go | 25 + .../api/flowcontrol/v1beta2/generated.pb.go | 5367 +++++++++++ .../api/flowcontrol/v1beta2/generated.proto | 440 + .../api/flowcontrol/v1beta2/register.go | 58 + .../k8s.io/api/flowcontrol/v1beta2/types.go | 579 ++ .../v1beta2/types_swagger_doc_generated.go | 261 + .../v1beta2/zz_generated.deepcopy.go | 542 ++ .../zz_generated.prerelease-lifecycle.go | 94 + .../k8s.io/api/networking/v1/generated.proto | 11 +- vendor/k8s.io/api/networking/v1/types.go | 15 +- .../v1/types_swagger_doc_generated.go | 6 +- .../networking/v1/well_known_annotations.go | 25 + .../networking/v1/zz_generated.deepcopy.go | 1 + .../api/networking/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/networking/v1beta1/types.go | 8 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + vendor/k8s.io/api/node/v1/generated.proto | 1 + vendor/k8s.io/api/node/v1/types.go | 1 + .../api/node/v1/zz_generated.deepcopy.go | 1 + .../k8s.io/api/node/v1alpha1/generated.proto | 7 +- vendor/k8s.io/api/node/v1alpha1/types.go | 7 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../node/v1alpha1/zz_generated.deepcopy.go | 1 + .../k8s.io/api/node/v1beta1/generated.proto | 7 +- vendor/k8s.io/api/node/v1beta1/types.go | 7 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/node/v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + vendor/k8s.io/api/policy/v1/generated.pb.go | 328 +- vendor/k8s.io/api/policy/v1/generated.proto | 13 + vendor/k8s.io/api/policy/v1/register.go | 1 + vendor/k8s.io/api/policy/v1/types.go | 22 + .../policy/v1/types_swagger_doc_generated.go | 10 + .../api/policy/v1/zz_generated.deepcopy.go | 32 + .../k8s.io/api/policy/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/policy/v1beta1/types.go | 12 +- .../v1beta1/types_swagger_doc_generated.go | 11 +- .../policy/v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + vendor/k8s.io/api/rbac/v1/generated.proto | 6 +- vendor/k8s.io/api/rbac/v1/types.go | 6 +- .../rbac/v1/types_swagger_doc_generated.go | 4 +- .../api/rbac/v1/zz_generated.deepcopy.go | 1 + .../k8s.io/api/rbac/v1alpha1/generated.proto | 4 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 1 + .../k8s.io/api/rbac/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../scheduling/v1/zz_generated.deepcopy.go | 1 + .../v1alpha1/zz_generated.deepcopy.go | 1 + .../v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + vendor/k8s.io/api/storage/v1/generated.proto | 15 +- vendor/k8s.io/api/storage/v1/types.go | 23 +- .../storage/v1/types_swagger_doc_generated.go | 8 +- .../api/storage/v1/zz_generated.deepcopy.go | 1 + .../storage/v1alpha1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../api/storage/v1beta1/generated.proto | 15 +- vendor/k8s.io/api/storage/v1beta1/types.go | 21 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../storage/v1beta1/zz_generated.deepcopy.go | 1 + .../zz_generated.prerelease-lifecycle.go | 1 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 1 - .../apimachinery/pkg/api/errors/errors.go | 201 +- .../pkg/api/meta/firsthit_restmapper.go | 8 + .../apimachinery/pkg/api/meta/interfaces.go | 9 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 12 +- .../pkg/api/meta/multirestmapper.go | 12 +- .../apimachinery/pkg/api/meta/priority.go | 8 + .../apimachinery/pkg/api/meta/restmapper.go | 9 + .../pkg/api/resource/generated.pb.go | 57 +- .../pkg/api/resource/generated.proto | 12 + .../apimachinery/pkg/api/resource/quantity.go | 48 +- .../pkg/api/resource/quantity_proto.go | 2 +- .../pkg/api/resource/zz_generated.deepcopy.go | 18 + .../zz_generated.conversion.go | 1 + .../internalversion/zz_generated.deepcopy.go | 1 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 2 - .../pkg/apis/meta/v1/generated.pb.go | 417 +- .../pkg/apis/meta/v1/generated.proto | 55 +- .../pkg/apis/meta/v1/group_version.go | 4 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 10 +- .../pkg/apis/meta/v1/micro_time_fuzz.go | 1 + .../pkg/apis/meta/v1/time_fuzz.go | 1 + .../apimachinery/pkg/apis/meta/v1/types.go | 60 +- .../meta/v1/types_swagger_doc_generated.go | 40 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 4 +- .../v1/unstructured/zz_generated.deepcopy.go | 1 + .../apis/meta/v1/zz_generated.conversion.go | 22 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1 + .../pkg/apis/meta/v1/zz_generated.defaults.go | 1 + .../meta/v1beta1/zz_generated.deepcopy.go | 1 + .../meta/v1beta1/zz_generated.defaults.go | 1 + .../apimachinery/pkg/conversion/converter.go | 12 +- .../apimachinery/pkg/labels/selector.go | 21 +- .../pkg/labels/zz_generated.deepcopy.go | 1 + .../apimachinery/pkg/runtime/converter.go | 210 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 33 +- .../apimachinery/pkg/runtime/interfaces.go | 3 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 24 +- .../pkg/runtime/serializer/codec_factory.go | 16 + .../pkg/runtime/serializer/json/json.go | 162 +- .../serializer/recognizer/recognizer.go | 12 +- .../runtime/serializer/streaming/streaming.go | 1 - .../serializer/versioning/versioning.go | 15 +- .../pkg/runtime/zz_generated.deepcopy.go | 1 + .../apimachinery/pkg/util/cache/expiring.go | 8 +- .../pkg/util/cache/lruexpirecache.go | 94 +- .../apimachinery/pkg/util/clock/clock.go | 445 - .../apimachinery/pkg/util/framer/framer.go | 6 +- .../pkg/util/intstr/instr_fuzz.go | 1 + .../k8s.io/apimachinery/pkg/util/json/json.go | 50 +- .../pkg/util/managedfields/extract.go | 30 +- .../pkg/util/managedfields/gvkparser.go | 127 + .../k8s.io/apimachinery/pkg/util/net/http.go | 37 +- .../apimachinery/pkg/util/net/interface.go | 5 +- .../pkg/util/validation/field/errors.go | 5 +- .../pkg/util/validation/validation.go | 7 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 244 +- .../apimachinery/pkg/util/yaml/decoder.go | 46 +- .../pkg/watch/zz_generated.deepcopy.go | 1 + .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + .../v1/mutatingwebhookconfiguration.go | 15 +- .../v1/validatingwebhookconfiguration.go | 15 +- .../v1beta1/mutatingwebhookconfiguration.go | 15 +- .../v1beta1/validatingwebhookconfiguration.go | 15 +- .../v1alpha1/storageversion.go | 15 +- .../apps/v1/controllerrevision.go | 15 +- .../applyconfigurations/apps/v1/daemonset.go | 15 +- .../applyconfigurations/apps/v1/deployment.go | 15 +- .../applyconfigurations/apps/v1/replicaset.go | 15 +- .../apps/v1/statefulset.go | 15 +- ...setpersistentvolumeclaimretentionpolicy.go | 52 + .../apps/v1/statefulsetspec.go | 34 +- .../apps/v1/statefulsetstatus.go | 9 + .../apps/v1beta1/controllerrevision.go | 15 +- .../apps/v1beta1/deployment.go | 15 +- .../apps/v1beta1/statefulset.go | 15 +- ...setpersistentvolumeclaimretentionpolicy.go | 52 + .../apps/v1beta1/statefulsetspec.go | 34 +- .../apps/v1beta1/statefulsetstatus.go | 9 + .../apps/v1beta2/controllerrevision.go | 15 +- .../apps/v1beta2/daemonset.go | 15 +- .../apps/v1beta2/deployment.go | 15 +- .../apps/v1beta2/replicaset.go | 15 +- .../applyconfigurations/apps/v1beta2/scale.go | 233 + .../apps/v1beta2/statefulset.go | 15 +- ...setpersistentvolumeclaimretentionpolicy.go | 52 + .../apps/v1beta2/statefulsetspec.go | 34 +- .../apps/v1beta2/statefulsetstatus.go | 9 + .../autoscaling/v1/horizontalpodautoscaler.go | 15 +- .../autoscaling/v1/scale.go | 232 + .../autoscaling/v1/scalespec.go | 39 + .../autoscaling/v1/scalestatus.go | 48 + .../v2/containerresourcemetricsource.go | 61 + .../v2/containerresourcemetricstatus.go | 61 + .../v2/crossversionobjectreference.go | 57 + .../autoscaling/v2/externalmetricsource.go | 48 + .../autoscaling/v2/externalmetricstatus.go | 48 + .../autoscaling/v2/horizontalpodautoscaler.go | 276 + .../v2/horizontalpodautoscalerbehavior.go | 48 + .../v2/horizontalpodautoscalercondition.go | 81 + .../v2/horizontalpodautoscalerspec.go | 80 + .../v2/horizontalpodautoscalerstatus.go | 98 + .../autoscaling/v2/hpascalingpolicy.go | 61 + .../autoscaling/v2/hpascalingrules.go | 66 + .../autoscaling/v2/metricidentifier.go | 52 + .../autoscaling/v2/metricspec.go | 88 + .../autoscaling/v2/metricstatus.go | 88 + .../autoscaling/v2/metrictarget.go | 71 + .../autoscaling/v2/metricvaluestatus.go | 61 + .../autoscaling/v2/objectmetricsource.go | 57 + .../autoscaling/v2/objectmetricstatus.go | 57 + .../autoscaling/v2/podresourcemetricsource.go | 52 + .../autoscaling/v2/podsmetricsource.go | 48 + .../autoscaling/v2/podsmetricstatus.go | 48 + .../autoscaling/v2/resourcemetricsource.go | 52 + .../autoscaling/v2/resourcemetricstatus.go | 52 + .../v2beta1/horizontalpodautoscaler.go | 15 +- .../v2beta2/horizontalpodautoscaler.go | 15 +- .../applyconfigurations/batch/v1/cronjob.go | 15 +- .../applyconfigurations/batch/v1/job.go | 15 +- .../applyconfigurations/batch/v1/jobstatus.go | 32 +- .../batch/v1/uncountedterminatedpods.go | 56 + .../batch/v1beta1/cronjob.go | 15 +- .../v1/certificatesigningrequest.go | 15 +- .../v1/certificatesigningrequestspec.go | 23 +- .../v1beta1/certificatesigningrequest.go | 15 +- .../v1beta1/certificatesigningrequestspec.go | 23 +- .../coordination/v1/lease.go | 15 +- .../coordination/v1beta1/lease.go | 15 +- .../core/v1/componentstatus.go | 15 +- .../applyconfigurations/core/v1/configmap.go | 15 +- .../applyconfigurations/core/v1/endpoints.go | 15 +- .../applyconfigurations/core/v1/event.go | 15 +- .../applyconfigurations/core/v1/grpcaction.go | 48 + .../applyconfigurations/core/v1/lifecycle.go | 8 +- .../v1/{handler.go => lifecyclehandler.go} | 16 +- .../applyconfigurations/core/v1/limitrange.go | 15 +- .../applyconfigurations/core/v1/namespace.go | 15 +- .../applyconfigurations/core/v1/node.go | 15 +- .../core/v1/persistentvolume.go | 15 +- .../core/v1/persistentvolumeclaim.go | 15 +- .../core/v1/persistentvolumeclaimspec.go | 9 + .../core/v1/persistentvolumeclaimstatus.go | 26 +- .../applyconfigurations/core/v1/pod.go | 15 +- .../applyconfigurations/core/v1/podos.go | 43 + .../applyconfigurations/core/v1/podspec.go | 9 + .../core/v1/podtemplate.go | 15 +- .../applyconfigurations/core/v1/probe.go | 22 +- .../core/v1/probehandler.go | 66 + .../core/v1/replicationcontroller.go | 15 +- .../core/v1/resourcequota.go | 15 +- .../applyconfigurations/core/v1/secret.go | 15 +- .../applyconfigurations/core/v1/service.go | 15 +- .../core/v1/serviceaccount.go | 15 +- .../core/v1/servicespec.go | 11 - .../core/v1/windowssecuritycontextoptions.go | 9 + .../discovery/v1/endpointslice.go | 15 +- .../discovery/v1beta1/endpointslice.go | 15 +- .../applyconfigurations/events/v1/event.go | 15 +- .../events/v1beta1/event.go | 15 +- .../extensions/v1beta1/daemonset.go | 15 +- .../extensions/v1beta1/deployment.go | 15 +- .../extensions/v1beta1/ingress.go | 15 +- .../extensions/v1beta1/networkpolicy.go | 15 +- .../extensions/v1beta1/podsecuritypolicy.go | 15 +- .../extensions/v1beta1/replicaset.go | 15 +- .../extensions/v1beta1/scale.go | 233 + .../flowcontrol/v1alpha1/flowschema.go | 15 +- .../v1alpha1/prioritylevelconfiguration.go | 15 +- .../flowcontrol/v1beta1/flowschema.go | 15 +- .../v1beta1/prioritylevelconfiguration.go | 15 +- .../v1beta2/flowdistinguishermethod.go | 43 + .../flowcontrol/v1beta2/flowschema.go | 274 + .../v1beta2/flowschemacondition.go | 80 + .../flowcontrol/v1beta2/flowschemaspec.go | 71 + .../flowcontrol/v1beta2/flowschemastatus.go | 44 + .../flowcontrol/v1beta2/groupsubject.go | 39 + .../limitedprioritylevelconfiguration.go | 48 + .../flowcontrol/v1beta2/limitresponse.go | 52 + .../v1beta2/nonresourcepolicyrule.go | 52 + .../v1beta2/policyruleswithsubjects.go | 72 + .../v1beta2/prioritylevelconfiguration.go | 274 + .../prioritylevelconfigurationcondition.go | 80 + .../prioritylevelconfigurationreference.go | 39 + .../v1beta2/prioritylevelconfigurationspec.go | 52 + .../prioritylevelconfigurationstatus.go | 44 + .../v1beta2/queuingconfiguration.go | 57 + .../flowcontrol/v1beta2/resourcepolicyrule.go | 83 + .../v1beta2/serviceaccountsubject.go | 48 + .../flowcontrol/v1beta2/subject.go | 70 + .../flowcontrol/v1beta2/usersubject.go | 39 + .../applyconfigurations/internal/internal.go | 2517 +++-- .../meta/v1/managedfieldsentry.go | 21 +- .../meta/v1/unstructured.go | 137 + .../networking/v1/ingress.go | 15 +- .../networking/v1/ingressclass.go | 15 +- .../networking/v1/networkpolicy.go | 15 +- .../networking/v1beta1/ingress.go | 15 +- .../networking/v1beta1/ingressclass.go | 15 +- .../node/v1/runtimeclass.go | 15 +- .../node/v1alpha1/runtimeclass.go | 15 +- .../node/v1beta1/runtimeclass.go | 15 +- .../applyconfigurations/policy/v1/eviction.go | 267 + .../policy/v1/poddisruptionbudget.go | 15 +- .../policy/v1beta1/eviction.go | 15 +- .../policy/v1beta1/poddisruptionbudget.go | 15 +- .../policy/v1beta1/podsecuritypolicy.go | 15 +- .../rbac/v1/clusterrole.go | 15 +- .../rbac/v1/clusterrolebinding.go | 15 +- .../applyconfigurations/rbac/v1/role.go | 15 +- .../rbac/v1/rolebinding.go | 15 +- .../rbac/v1alpha1/clusterrole.go | 15 +- .../rbac/v1alpha1/clusterrolebinding.go | 15 +- .../applyconfigurations/rbac/v1alpha1/role.go | 15 +- .../rbac/v1alpha1/rolebinding.go | 15 +- .../rbac/v1beta1/clusterrole.go | 15 +- .../rbac/v1beta1/clusterrolebinding.go | 15 +- .../applyconfigurations/rbac/v1beta1/role.go | 15 +- .../rbac/v1beta1/rolebinding.go | 15 +- .../scheduling/v1/priorityclass.go | 15 +- .../scheduling/v1alpha1/priorityclass.go | 15 +- .../scheduling/v1beta1/priorityclass.go | 15 +- .../storage/v1/csidriver.go | 15 +- .../applyconfigurations/storage/v1/csinode.go | 15 +- .../storage/v1/storageclass.go | 15 +- .../storage/v1/volumeattachment.go | 15 +- .../storage/v1alpha1/csistoragecapacity.go | 15 +- .../storage/v1alpha1/volumeattachment.go | 15 +- .../storage/v1beta1/csidriver.go | 15 +- .../storage/v1beta1/csinode.go | 15 +- .../storage/v1beta1/csistoragecapacity.go | 15 +- .../storage/v1beta1/storageclass.go | 15 +- .../storage/v1beta1/volumeattachment.go | 15 +- .../client-go/discovery/discovery_client.go | 25 +- .../k8s.io/client-go/kubernetes/clientset.go | 190 +- .../client-go/kubernetes/scheme/register.go | 4 + .../v1/admissionregistration_client.go | 20 +- .../v1beta1/admissionregistration_client.go | 20 +- .../v1alpha1/apiserverinternal_client.go | 20 +- .../kubernetes/typed/apps/v1/apps_client.go | 20 +- .../kubernetes/typed/apps/v1/deployment.go | 27 + .../kubernetes/typed/apps/v1/replicaset.go | 27 + .../kubernetes/typed/apps/v1/statefulset.go | 27 + .../typed/apps/v1beta1/apps_client.go | 20 +- .../typed/apps/v1beta2/apps_client.go | 20 +- .../typed/apps/v1beta2/statefulset.go | 26 + .../v1/authentication_client.go | 20 +- .../v1beta1/authentication_client.go | 20 +- .../authorization/v1/authorization_client.go | 20 +- .../v1beta1/authorization_client.go | 20 +- .../autoscaling/v1/autoscaling_client.go | 20 +- .../autoscaling/v2/autoscaling_client.go | 107 + .../kubernetes/typed/autoscaling/v2/doc.go | 20 + .../autoscaling/v2/generated_expansion.go | 21 + .../autoscaling/v2/horizontalpodautoscaler.go | 256 + .../autoscaling/v2beta1/autoscaling_client.go | 20 +- .../autoscaling/v2beta2/autoscaling_client.go | 20 +- .../kubernetes/typed/batch/v1/batch_client.go | 20 +- .../typed/batch/v1beta1/batch_client.go | 20 +- .../certificates/v1/certificates_client.go | 20 +- .../v1beta1/certificates_client.go | 20 +- .../coordination/v1/coordination_client.go | 20 +- .../v1beta1/coordination_client.go | 20 +- .../kubernetes/typed/core/v1/core_client.go | 20 +- .../typed/core/v1/event_expansion.go | 4 + .../client-go/kubernetes/typed/core/v1/pod.go | 25 +- .../kubernetes/typed/core/v1/pod_expansion.go | 27 +- .../typed/discovery/v1/discovery_client.go | 20 +- .../discovery/v1beta1/discovery_client.go | 20 +- .../typed/events/v1/events_client.go | 20 +- .../typed/events/v1beta1/event_expansion.go | 2 +- .../typed/events/v1beta1/events_client.go | 20 +- .../typed/extensions/v1beta1/deployment.go | 26 + .../extensions/v1beta1/extensions_client.go | 20 +- .../typed/extensions/v1beta1/replicaset.go | 26 + .../v1alpha1/flowcontrol_client.go | 20 +- .../flowcontrol/v1beta1/flowcontrol_client.go | 20 +- .../typed/flowcontrol/v1beta2/doc.go | 20 + .../flowcontrol/v1beta2/flowcontrol_client.go | 112 + .../typed/flowcontrol/v1beta2/flowschema.go | 243 + .../v1beta2/generated_expansion.go | 23 + .../v1beta2/prioritylevelconfiguration.go | 243 + .../typed/networking/v1/networking_client.go | 20 +- .../networking/v1beta1/networking_client.go | 20 +- .../kubernetes/typed/node/v1/node_client.go | 20 +- .../typed/node/v1alpha1/node_client.go | 20 +- .../typed/node/v1beta1/node_client.go | 20 +- .../kubernetes/typed/policy/v1/eviction.go | 48 + .../typed/policy/v1/eviction_expansion.go | 40 + .../typed/policy/v1/policy_client.go | 25 +- .../typed/policy/v1beta1/policy_client.go | 20 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 20 +- .../typed/rbac/v1alpha1/rbac_client.go | 20 +- .../typed/rbac/v1beta1/rbac_client.go | 20 +- .../typed/scheduling/v1/scheduling_client.go | 20 +- .../scheduling/v1alpha1/scheduling_client.go | 20 +- .../scheduling/v1beta1/scheduling_client.go | 20 +- .../typed/storage/v1/storage_client.go | 20 +- .../typed/storage/v1alpha1/storage_client.go | 20 +- .../typed/storage/v1beta1/storage_client.go | 20 +- .../clientauthentication/install/install.go | 36 + .../pkg/apis/clientauthentication/types.go | 2 +- .../clientauthentication/v1/conversion.go | 28 + .../pkg/apis/clientauthentication/v1/doc.go | 24 + .../apis/clientauthentication/v1/register.go | 55 + .../pkg/apis/clientauthentication/v1/types.go | 122 + .../v1/zz_generated.conversion.go | 201 + .../v1/zz_generated.deepcopy.go | 120 + .../v1/zz_generated.defaults.go | 33 + .../v1alpha1/zz_generated.conversion.go | 1 + .../v1alpha1/zz_generated.deepcopy.go | 1 + .../v1alpha1/zz_generated.defaults.go | 1 + .../v1beta1/conversion.go | 2 +- .../clientauthentication/v1beta1/types.go | 3 + .../v1beta1/zz_generated.conversion.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../v1beta1/zz_generated.defaults.go | 1 + .../zz_generated.deepcopy.go | 1 + .../client-go/pkg/version/.gitattributes | 1 - .../plugin/pkg/client/auth/exec/exec.go | 94 +- .../plugin/pkg/client/auth/exec/metrics.go | 4 +- vendor/k8s.io/client-go/rest/OWNERS | 1 - vendor/k8s.io/client-go/rest/config.go | 64 +- vendor/k8s.io/client-go/rest/request.go | 345 +- vendor/k8s.io/client-go/rest/transport.go | 22 + vendor/k8s.io/client-go/rest/with_retry.go | 232 + .../client-go/rest/zz_generated.deepcopy.go | 1 + vendor/k8s.io/client-go/tools/cache/OWNERS | 2 - .../client-go/tools/cache/controller.go | 84 +- .../client-go/tools/cache/delta_fifo.go | 46 +- .../client-go/tools/cache/expiration_cache.go | 2 +- .../tools/cache/expiration_cache_fakes.go | 2 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 7 +- vendor/k8s.io/client-go/tools/cache/heap.go | 5 +- vendor/k8s.io/client-go/tools/cache/index.go | 4 +- .../tools/cache/mutation_detector.go | 2 +- .../k8s.io/client-go/tools/cache/reflector.go | 18 +- .../client-go/tools/cache/shared_informer.go | 16 +- vendor/k8s.io/client-go/tools/cache/store.go | 5 + .../tools/cache/thread_safe_store.go | 90 +- .../client-go/tools/clientcmd/api/types.go | 54 +- .../tools/clientcmd/api/v1/conversion.go | 2 +- .../tools/clientcmd/api/v1/defaults.go | 37 + .../client-go/tools/clientcmd/api/v1/doc.go | 1 + .../tools/clientcmd/api/v1/register.go | 2 +- .../client-go/tools/clientcmd/api/v1/types.go | 39 +- .../api/v1/zz_generated.conversion.go | 7 + .../clientcmd/api/v1/zz_generated.deepcopy.go | 1 + .../clientcmd/api/v1/zz_generated.defaults.go | 43 + .../clientcmd/api/zz_generated.deepcopy.go | 1 + .../tools/clientcmd/client_config.go | 2 + .../client-go/tools/clientcmd/config.go | 6 +- .../client-go/tools/clientcmd/overrides.go | 4 + .../client-go/tools/clientcmd/validation.go | 22 +- vendor/k8s.io/client-go/tools/pager/pager.go | 5 +- vendor/k8s.io/client-go/transport/config.go | 2 + .../client-go/transport/round_trippers.go | 123 +- .../client-go/transport/token_source.go | 5 + .../k8s.io/client-go/transport/transport.go | 40 +- vendor/k8s.io/client-go/util/cert/cert.go | 4 +- .../client-go/util/flowcontrol/backoff.go | 52 +- .../client-go/util/flowcontrol/throttle.go | 91 +- .../util/workqueue/default_rate_limiters.go | 29 +- .../util/workqueue/delaying_queue.go | 6 +- .../client-go/util/workqueue/metrics.go | 2 +- .../k8s.io/client-go/util/workqueue/queue.go | 81 +- vendor/k8s.io/kube-openapi/LICENSE | 202 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 457 + .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 + .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 362 + .../kube-openapi/pkg/util/proto/openapi.go | 285 + vendor/k8s.io/utils/clock/README.md | 4 + vendor/k8s.io/utils/clock/clock.go | 168 + .../k8s.io/utils/clock/testing/fake_clock.go | 360 + .../clock/testing/simple_interval_clock.go | 44 + .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + .../third_party/forked/golang/net/ip.go | 236 + .../third_party/forked/golang/net/parse.go | 59 + vendor/k8s.io/utils/net/ipnet.go | 221 + vendor/k8s.io/utils/net/net.go | 213 + vendor/k8s.io/utils/net/parse.go | 33 + vendor/k8s.io/utils/net/port.go | 137 + vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/modules.txt | 40 +- vendor/sigs.k8s.io/json/CONTRIBUTING.md | 42 + vendor/sigs.k8s.io/json/LICENSE | 238 + vendor/sigs.k8s.io/json/Makefile | 35 + vendor/sigs.k8s.io/json/OWNERS | 6 + vendor/sigs.k8s.io/json/README.md | 40 + vendor/sigs.k8s.io/json/SECURITY.md | 22 + vendor/sigs.k8s.io/json/SECURITY_CONTACTS | 15 + vendor/sigs.k8s.io/json/code-of-conduct.md | 3 + vendor/sigs.k8s.io/json/doc.go | 17 + .../internal/golang/encoding/json/decode.go | 1402 +++ .../internal/golang/encoding/json/encode.go | 1419 +++ .../internal/golang/encoding/json/fold.go | 143 + .../internal/golang/encoding/json/fuzz.go | 43 + .../internal/golang/encoding/json/indent.go | 143 + .../golang/encoding/json/kubernetes_patch.go | 109 + .../internal/golang/encoding/json/scanner.go | 608 ++ .../internal/golang/encoding/json/stream.go | 519 ++ .../internal/golang/encoding/json/tables.go | 218 + .../internal/golang/encoding/json/tags.go | 44 + vendor/sigs.k8s.io/json/json.go | 139 + .../structured-merge-diff/v4/typed/merge.go | 180 +- .../v4/typed/reconcile_schema.go | 71 +- .../structured-merge-diff/v4/typed/remove.go | 37 +- 619 files changed, 55181 insertions(+), 11519 deletions(-) delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extensions.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/README.md create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/base.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/display.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/models.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/operations.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/reader.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/schema.json create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/writer.go create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/document.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v2/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/doc.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1/well_known_annotations.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go rename vendor/k8s.io/client-go/applyconfigurations/core/v1/{handler.go => lifecyclehandler.go} (69%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/.gitattributes create mode 100644 vendor/k8s.io/client-go/rest/with_retry.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/kube-openapi/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go create mode 100644 vendor/k8s.io/utils/clock/README.md create mode 100644 vendor/k8s.io/utils/clock/clock.go create mode 100644 vendor/k8s.io/utils/clock/testing/fake_clock.go create mode 100644 vendor/k8s.io/utils/clock/testing/simple_interval_clock.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go create mode 100644 vendor/k8s.io/utils/net/ipnet.go create mode 100644 vendor/k8s.io/utils/net/net.go create mode 100644 vendor/k8s.io/utils/net/parse.go create mode 100644 vendor/k8s.io/utils/net/port.go create mode 100644 vendor/sigs.k8s.io/json/CONTRIBUTING.md create mode 100644 vendor/sigs.k8s.io/json/LICENSE create mode 100644 vendor/sigs.k8s.io/json/Makefile create mode 100644 vendor/sigs.k8s.io/json/OWNERS create mode 100644 vendor/sigs.k8s.io/json/README.md create mode 100644 vendor/sigs.k8s.io/json/SECURITY.md create mode 100644 vendor/sigs.k8s.io/json/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/json/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/json/doc.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/indent.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/tables.go create mode 100644 vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go create mode 100644 vendor/sigs.k8s.io/json/json.go diff --git a/go.mod b/go.mod index 44e60f196f834..78aaecdecd799 100644 --- a/go.mod +++ b/go.mod @@ -182,7 +182,7 @@ require ( github.com/google/gofuzz v1.1.0 // indirect github.com/google/pprof v0.0.0-20220218203455-0368bd9e19a7 // indirect github.com/googleapis/gax-go/v2 v2.1.1 // indirect - github.com/googleapis/gnostic v0.4.1 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/gophercloud/gophercloud v0.24.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect @@ -272,13 +272,15 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.57.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/api v0.22.7 // indirect - k8s.io/apimachinery v0.22.7 // indirect - k8s.io/client-go v12.0.0+incompatible // indirect + k8s.io/api v0.23.6 // indirect + k8s.io/apimachinery v0.23.6 // indirect + k8s.io/client-go v0.23.6 // indirect k8s.io/klog/v2 v2.40.1 // indirect - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect rsc.io/binaryregexp v0.2.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) @@ -291,11 +293,11 @@ replace github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v replace github.com/Azure/azure-storage-blob-go => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e -replace k8s.io/client-go => k8s.io/client-go v0.21.0 +replace k8s.io/client-go => k8s.io/client-go v0.23.6 -replace k8s.io/api => k8s.io/api v0.21.0 +replace k8s.io/api => k8s.io/api v0.23.6 -replace k8s.io/apimachinery => k8s.io/apimachinery v0.21.0 +replace k8s.io/apimachinery => k8s.io/apimachinery v0.23.6 replace github.com/hashicorp/consul => github.com/hashicorp/consul v1.5.1 @@ -313,3 +315,5 @@ replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0- replace github.com/thanos-io/thanos v0.22.0 => github.com/thanos-io/thanos v0.19.1-0.20211126105533-c5505f5eaa7d replace github.com/cloudflare/cloudflare-go => github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93 + +exclude k8s.io/client-go v8.0.0+incompatible diff --git a/go.sum b/go.sum index 5752cdbf13979..cf40f5921ab33 100644 --- a/go.sum +++ b/go.sum @@ -89,8 +89,8 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= @@ -632,8 +632,9 @@ github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E github.com/envoyproxy/protoc-gen-validate v0.6.6 h1:BApABShi05CepE340unZKC07YxY/I8KgnWPICc3U5yM= github.com/envoyproxy/protoc-gen-validate v0.6.6/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -652,6 +653,7 @@ github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+p github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= @@ -670,6 +672,7 @@ github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7R github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= @@ -702,7 +705,6 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= @@ -1008,8 +1010,10 @@ github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= @@ -1519,8 +1523,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -1804,6 +1808,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -2066,11 +2071,11 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -2184,7 +2189,6 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -2202,6 +2206,7 @@ golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2314,6 +2319,7 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2339,7 +2345,6 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2363,6 +2368,7 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2378,7 +2384,7 @@ golang.org/x/sys v0.0.0-20220222172238-00053529121e h1:AGLQ2aegkB2Y9RY8YdQk+7MDC golang.org/x/sys v0.0.0-20220222172238-00053529121e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2464,6 +2470,7 @@ golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2599,6 +2606,7 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2774,15 +2782,15 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= -k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/api v0.23.6 h1:yOK34wbYECH4RsJbQ9sfkFK3O7f/DUHRlzFehkqZyVw= +k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g= +k8s.io/apimachinery v0.23.6 h1:RH1UweWJkWNTlFx0D8uxOpaU1tjIOvVVWV/bu5b3/NQ= +k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.23.6 h1:7h4SctDVQAQbkHQnR4Kzi7EyUyvla5G1pFWf4+Od7hQ= +k8s.io/client-go v0.23.6/go.mod h1:Umt5icFOMLV/+qbtZ3PR0D+JA6lvvb3syzodv4irpK4= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2791,21 +2799,25 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.20.0/go.mod h1:Gm8eSIfQN6457haJuPaMxZw4wyP5k+ykPFlrhQDvhvw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= modernc.org/libc v1.3.1/go.mod h1:f8sp9GAfEyGYh3lsRIKtBh/XwACdFvGznxm6GJmQvXk= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -2819,10 +2831,12 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md index 848b16c69b80a..ee9783d232f47 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/README.md +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -1,3 +1,4 @@ # Compiler support code -This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file +This directory contains compiler support code used by Gnostic and Gnostic +extensions. diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go index a64c1b75db40f..1bfe9612194b5 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/context.go +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,30 +14,36 @@ package compiler +import ( + yaml "gopkg.in/yaml.v3" +) + // Context contains state of the compiler as it traverses a document. type Context struct { Parent *Context Name string + Node *yaml.Node ExtensionHandlers *[]ExtensionHandler } // NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers} } // NewContext returns a new object representing the compiler state -func NewContext(name string, parent *Context) *Context { +func NewContext(name string, node *yaml.Node, parent *Context) *Context { if parent != nil { - return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} } return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} } // Description returns a text description of the compiler state func (context *Context) Description() string { + name := context.Name if context.Parent != nil { - return context.Parent.Description() + "." + context.Name + name = context.Parent.Description() + "." + name } - return context.Name + return name } diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go index d8672c1008b72..6f40515d6b6e3 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/error.go +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ package compiler +import "fmt" + // Error represents compiler errors and their location in the document. type Error struct { Context *Context @@ -25,12 +27,19 @@ func NewError(context *Context, message string) *Error { return &Error{Context: context, Message: message} } +func (err *Error) locationDescription() string { + if err.Context.Node != nil { + return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description()) + } + return err.Context.Description() +} + // Error returns the string value of an Error. func (err *Error) Error() string { if err.Context == nil { - return "ERROR " + err.Message + return err.Message } - return "ERROR " + err.Context.Description() + " " + err.Message + return err.locationDescription() + " " + err.Message } // ErrorGroup is a container for groups of Error values. diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go deleted file mode 100644 index 1f85b650e8172..0000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - - "strings" - - "errors" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - ext_plugin "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v2" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// HandleExtension calls a binary extension handler. -func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { - handled := false - var errFromPlugin error - var outFromPlugin *any.Any - - if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { - for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { - outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) - if outFromPlugin == nil { - continue - } else { - handled = true - break - } - } - } - return handled, outFromPlugin, errFromPlugin -} - -func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - binary, _ := yaml.Marshal(in) - - request := &ext_plugin.ExtensionHandlerRequest{} - - version := &ext_plugin.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.Wrapper = &ext_plugin.Wrapper{} - - request.Wrapper.Version = "v2" - request.Wrapper.Yaml = string(binary) - request.Wrapper.ExtensionName = extensionName - - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - - if err != nil { - fmt.Printf("Error: %+v\n", err) - return nil, err - } - response := &ext_plugin.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil { - fmt.Printf("Error: %+v\n", err) - fmt.Printf("%s\n", string(output)) - return nil, err - } - if !response.Handled { - return nil, nil - } - if len(response.Error) != 0 { - message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) - return nil, errors.New(message) - } - return response.Value, nil - } - return nil, nil -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/googleapis/gnostic/compiler/extensions.go new file mode 100644 index 0000000000000..20848a0a1636e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extensions.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + extensions "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v3" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// CallExtension calls a binary extension handler. +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { + if context == nil || context.ExtensionHandlers == nil { + return false, nil, nil + } + handled = false + for _, handler := range *(context.ExtensionHandlers) { + response, err = handler.handle(in, extensionName) + if response == nil { + continue + } else { + handled = true + break + } + } + return handled, response, err +} + +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + yamlData, _ := yaml.Marshal(in) + request := &extensions.ExtensionHandlerRequest{ + CompilerVersion: &extensions.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + Wrapper: &extensions.Wrapper{ + Version: "unknown", // TODO: set this to the type/version of spec being parsed. + Yaml: string(yamlData), + ExtensionName: extensionName, + }, + } + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + if err != nil { + return nil, err + } + response := &extensions.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil || !response.Handled { + return nil, err + } + if len(response.Errors) != 0 { + return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go index 76df635ff9bf8..48f02f3950ec7 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,56 +16,63 @@ package compiler import ( "fmt" - "gopkg.in/yaml.v2" "regexp" "sort" "strconv" + + "github.com/googleapis/gnostic/jsonschema" + "gopkg.in/yaml.v3" ) // compiler helper functions, usually called from generated code -// UnpackMap gets a yaml.MapSlice if possible. -func UnpackMap(in interface{}) (yaml.MapSlice, bool) { - m, ok := in.(yaml.MapSlice) - if ok { - return m, true - } - // do we have an empty array? - a, ok := in.([]interface{}) - if ok && len(a) == 0 { - // if so, return an empty map - return yaml.MapSlice{}, true +// UnpackMap gets a *yaml.Node if possible. +func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { + if in == nil { + return nil, false } - return nil, false + return in, true } -// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. -func SortedKeysForMap(m yaml.MapSlice) []string { +// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. +func SortedKeysForMap(m *yaml.Node) []string { keys := make([]string, 0) - for _, item := range m { - keys = append(keys, item.Key.(string)) + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + keys = append(keys, m.Content[i].Value) + } } sort.Strings(keys) return keys } -// MapHasKey returns true if a yaml.MapSlice contains a specified key. -func MapHasKey(m yaml.MapSlice, key string) bool { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return true +// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. +func MapHasKey(m *yaml.Node, key string) bool { + if m == nil { + return false + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return true + } } } return false } // MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m yaml.MapSlice, key string) interface{} { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return item.Value +func MapValueForKey(m *yaml.Node, key string) *yaml.Node { + if m == nil { + return nil + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return m.Content[i+1] + } } } return nil @@ -83,8 +90,118 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { return stringArray } +// SequenceNodeForNode returns a node if it is a SequenceNode. +func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { + if node.Kind != yaml.SequenceNode { + return nil, false + } + return node, true +} + +// BoolForScalarNode returns the bool value of a node. +func BoolForScalarNode(node *yaml.Node) (bool, bool) { + if node == nil { + return false, false + } + if node.Kind == yaml.DocumentNode { + return BoolForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return false, false + } + if node.Tag != "!!bool" { + return false, false + } + v, err := strconv.ParseBool(node.Value) + if err != nil { + return false, false + } + return v, true +} + +// IntForScalarNode returns the integer value of a node. +func IntForScalarNode(node *yaml.Node) (int64, bool) { + if node == nil { + return 0, false + } + if node.Kind == yaml.DocumentNode { + return IntForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0, false + } + if node.Tag != "!!int" { + return 0, false + } + v, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +// FloatForScalarNode returns the float value of a node. +func FloatForScalarNode(node *yaml.Node) (float64, bool) { + if node == nil { + return 0.0, false + } + if node.Kind == yaml.DocumentNode { + return FloatForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0.0, false + } + if (node.Tag != "!!int") && (node.Tag != "!!float") { + return 0.0, false + } + v, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return 0.0, false + } + return v, true +} + +// StringForScalarNode returns the string value of a node. +func StringForScalarNode(node *yaml.Node) (string, bool) { + if node == nil { + return "", false + } + if node.Kind == yaml.DocumentNode { + return StringForScalarNode(node.Content[0]) + } + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!int": + return node.Value, true + case "!!str": + return node.Value, true + case "!!timestamp": + return node.Value, true + case "!!null": + return "", true + default: + return "", false + } + default: + return "", false + } +} + +// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. +func StringArrayForSequenceNode(node *yaml.Node) []string { + stringArray := make([]string, 0) + for _, item := range node.Content { + v, ok := StringForScalarNode(item) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + // MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { +func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { missingKeys := make([]string, 0) for _, k := range requiredKeys { if !MapHasKey(m, k) { @@ -95,64 +212,109 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { } // InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { +func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { invalidKeys := make([]string, 0) - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok { - key := itemKey - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { + if m == nil || m.Kind != yaml.MappingNode { + return invalidKeys + } + for i := 0; i < len(m.Content); i += 2 { + key := m.Content[i].Value + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { found = true break } } if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } + invalidKeys = append(invalidKeys, key) } } } return invalidKeys } -// DescribeMap describes a map (for debugging purposes). -func DescribeMap(in interface{}, indent string) string { - description := "" - m, ok := in.(map[string]interface{}) - if ok { - keys := make([]string, 0) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := m[k] - description += fmt.Sprintf("%s%s:\n", indent, k) - description += DescribeMap(v, indent+" ") - } - return description +// NewNullNode creates a new Null node. +func NewNullNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!null", } - a, ok := in.([]interface{}) - if ok { - for i, v := range a { - description += fmt.Sprintf("%s%d:\n", indent, i) - description += DescribeMap(v, indent+" ") - } - return description + return node +} + +// NewMappingNode creates a new Mapping node. +func NewMappingNode() *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: make([]*yaml.Node, 0), + } +} + +// NewSequenceNode creates a new Sequence node. +func NewSequenceNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + return node +} + +// NewScalarNodeForString creates a new node to hold a string. +func NewScalarNodeForString(s string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: s, + } +} + +// NewSequenceNodeForStringArray creates a new node to hold an array of strings. +func NewSequenceNodeForStringArray(strings []string) *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + for _, s := range strings { + node.Content = append(node.Content, NewScalarNodeForString(s)) + } + return node +} + +// NewScalarNodeForBool creates a new node to hold a bool. +func NewScalarNodeForBool(b bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", b), + } +} + +// NewScalarNodeForFloat creates a new node to hold a float. +func NewScalarNodeForFloat(f float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%g", f), + } +} + +// NewScalarNodeForInt creates a new node to hold an integer. +func NewScalarNodeForInt(i int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", i), } - description += fmt.Sprintf("%s%+v\n", indent, in) - return description } // PluralProperties returns the string "properties" pluralized. @@ -195,3 +357,40 @@ func StringValue(item interface{}) (value string, ok bool) { } return "", false } + +// Description returns a human-readable represention of an item. +func Description(item interface{}) string { + value, ok := item.(*yaml.Node) + if ok { + return jsonschema.Render(value) + } + return fmt.Sprintf("%+v", item) +} + +// Display returns a description of a node for use in error messages. +func Display(node *yaml.Node) string { + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!str": + return fmt.Sprintf("%s (string)", node.Value) + } + } + return fmt.Sprintf("%+v (%T)", node, node) +} + +// Marshal creates a yaml version of a structure in our preferred style +func Marshal(in *yaml.Node) []byte { + clearStyle(in) + //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) + bytes, _ := yaml.Marshal(in) + + return bytes +} + +func clearStyle(node *yaml.Node) { + node.Style = 0 + for _, c := range node.Content { + clearStyle(c) + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go index 9713a21cca264..ce9fcc456cc48 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index 955b985b80feb..be0e8b40c8cd4 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ package compiler import ( - "errors" "fmt" "io/ioutil" "log" @@ -23,18 +22,30 @@ import ( "net/url" "path/filepath" "strings" + "sync" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) +var verboseReader = false + var fileCache map[string][]byte -var infoCache map[string]interface{} -var count int64 +var infoCache map[string]*yaml.Node -var verboseReader = false var fileCacheEnable = true var infoCacheEnable = true +// These locks are used to synchronize accesses to the fileCache and infoCache +// maps (above). They are global state and can throw thread-related errors +// when modified from separate goroutines. The general strategy is to protect +// all public functions in this file with mutex Lock() calls. As a result, to +// avoid deadlock, these public functions should not call other public +// functions, so some public functions have private equivalents. +// In the future, we might consider replacing the maps with sync.Map and +// eliminating these mutexes. +var fileCacheMutex sync.Mutex +var infoCacheMutex sync.Mutex + func initializeFileCache() { if fileCache == nil { fileCache = make(map[string][]byte, 0) @@ -43,27 +54,42 @@ func initializeFileCache() { func initializeInfoCache() { if infoCache == nil { - infoCache = make(map[string]interface{}, 0) + infoCache = make(map[string]*yaml.Node, 0) } } +// EnableFileCache turns on file caching. func EnableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() fileCacheEnable = true } +// EnableInfoCache turns on parsed info caching. func EnableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() infoCacheEnable = true } +// DisableFileCache turns off file caching. func DisableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() fileCacheEnable = false } +// DisableInfoCache turns off parsed info caching. func DisableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() infoCacheEnable = false } +// RemoveFromFileCache removes an entry from the file cache. func RemoveFromFileCache(fileurl string) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() if !fileCacheEnable { return } @@ -71,7 +97,10 @@ func RemoveFromFileCache(fileurl string) { delete(fileCache, fileurl) } +// RemoveFromInfoCache removes an entry from the info cache. func RemoveFromInfoCache(filename string) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() if !infoCacheEnable { return } @@ -79,21 +108,31 @@ func RemoveFromInfoCache(filename string) { delete(infoCache, filename) } -func GetInfoCache() map[string]interface{} { +// GetInfoCache returns the info cache map. +func GetInfoCache() map[string]*yaml.Node { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() if infoCache == nil { initializeInfoCache() } return infoCache } +// ClearFileCache clears the file cache. func ClearFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() fileCache = make(map[string][]byte, 0) } +// ClearInfoCache clears the info cache. func ClearInfoCache() { - infoCache = make(map[string]interface{}) + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCache = make(map[string]*yaml.Node) } +// ClearCaches clears all caches. func ClearCaches() { ClearFileCache() ClearInfoCache() @@ -101,6 +140,12 @@ func ClearCaches() { // FetchFile gets a specified file from the local filesystem or a remote location. func FetchFile(fileurl string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return fetchFile(fileurl) +} + +func fetchFile(fileurl string) ([]byte, error) { var bytes []byte initializeFileCache() if fileCacheEnable { @@ -121,7 +166,7 @@ func FetchFile(fileurl string) ([]byte, error) { } defer response.Body.Close() if response.StatusCode != 200 { - return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) } bytes, err = ioutil.ReadAll(response.Body) if fileCacheEnable && err == nil { @@ -132,11 +177,17 @@ func FetchFile(fileurl string) ([]byte, error) { // ReadBytesForFile reads the bytes of a file. func ReadBytesForFile(filename string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return readBytesForFile(filename) +} + +func readBytesForFile(filename string) ([]byte, error) { // is the filename a url? fileurl, _ := url.Parse(filename) if fileurl.Scheme != "" { // yes, fetch it - bytes, err := FetchFile(filename) + bytes, err := fetchFile(filename) if err != nil { return nil, err } @@ -150,8 +201,14 @@ func ReadBytesForFile(filename string) ([]byte, error) { return bytes, nil } -// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. -func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { +// ReadInfoFromBytes unmarshals a file as a *yaml.Node. +func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + return readInfoFromBytes(filename, bytes) +} + +func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { initializeInfoCache() if infoCacheEnable { cachedInfo, ok := infoCache[filename] @@ -165,19 +222,23 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { log.Printf("Reading info for file %s", filename) } } - var info yaml.MapSlice + var info yaml.Node err := yaml.Unmarshal(bytes, &info) if err != nil { return nil, err } if infoCacheEnable && len(filename) > 0 { - infoCache[filename] = info + infoCache[filename] = &info } - return info, nil + return &info, nil } // ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (interface{}, error) { +func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() initializeInfoCache() if infoCacheEnable { info, ok := infoCache[ref] @@ -191,7 +252,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { log.Printf("Reading info for ref %s#%s", basefile, ref) } } - count = count + 1 basedir, _ := filepath.Split(basefile) parts := strings.Split(ref, "#") var filename string @@ -204,24 +264,30 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { } else { filename = basefile } - bytes, err := ReadBytesForFile(filename) + bytes, err := readBytesForFile(filename) if err != nil { return nil, err } - info, err := ReadInfoFromBytes(filename, bytes) + info, err := readInfoFromBytes(filename, bytes) + if info != nil && info.Kind == yaml.DocumentNode { + info = info.Content[0] + } if err != nil { log.Printf("File error: %v\n", err) } else { + if info == nil { + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } if len(parts) > 1 { path := strings.Split(parts[1], "/") for i, key := range path { if i > 0 { - m, ok := info.(yaml.MapSlice) - if ok { + m := info + if true { found := false - for _, section := range m { - if section.Key == key { - info = section.Value + for i := 0; i < len(m.Content); i += 2 { + if m.Content[i].Value == key { + info = m.Content[i+1] found = true } } diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md index ff1c2eb1e3ad5..4b5d63e5856d7 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/README.md +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -1,5 +1,13 @@ # Extensions -This directory contains support code for building Gnostic extensions and associated examples. +**Extension Support is experimental.** -Extensions are used to compile vendor or specification extensions into protocol buffer structures. +This directory contains support code for building Gnostic extensio handlers and +associated examples. + +Extension handlers can be used to compile vendor or specification extensions +into protocol buffer structures. + +Like plugins, extension handlers are built as separate executables. Extension +bodies are written to extension handlers as serialized +ExtensionHandlerRequests. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index 432dc06e6d08c..5aab58ebfbe43 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,148 +1,181 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.15.5 // source: extensions/extension.proto -package openapiextension_v1 +package gnostic_extension_v1 import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// The version number of OpenAPI compiler. +// The version number of Gnostic. type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` } -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{0} +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Version proto.InternalMessageInfo +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{0} +} -func (m *Version) GetMajor() int32 { - if m != nil { - return m.Major +func (x *Version) GetMajor() int32 { + if x != nil { + return x.Major } return 0 } -func (m *Version) GetMinor() int32 { - if m != nil { - return m.Minor +func (x *Version) GetMinor() int32 { + if x != nil { + return x.Minor } return 0 } -func (m *Version) GetPatch() int32 { - if m != nil { - return m.Patch +func (x *Version) GetPatch() int32 { + if x != nil { + return x.Patch } return 0 } -func (m *Version) GetSuffix() string { - if m != nil { - return m.Suffix +func (x *Version) GetSuffix() string { + if x != nil { + return x.Suffix } return "" } // An encoded Request is written to the ExtensionHandler's stdin. type ExtensionHandlerRequest struct { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension to process. Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` - // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // The version number of Gnostic. + CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` } -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{1} +func (x *ExtensionHandlerRequest) Reset() { + *x = ExtensionHandlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) -} -func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) -} -func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src) -} -func (m *ExtensionHandlerRequest) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerRequest.Size(m) +func (x *ExtensionHandlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m) + +func (*ExtensionHandlerRequest) ProtoMessage() {} + +func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo +// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{1} +} -func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if m != nil { - return m.Wrapper +func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if x != nil { + return x.Wrapper } return nil } -func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion +func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion } return nil } // The extensions writes an encoded ExtensionHandlerResponse to stdout. type ExtensionHandlerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // true if the extension is handled by the extension handler; false otherwise Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` - // Error message. If non-empty, the extension handling failed. + // Error message(s). If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. // @@ -151,150 +184,278 @@ type ExtensionHandlerResponse struct { // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. - Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` + Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` // text output - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{2} +func (x *ExtensionHandlerResponse) Reset() { + *x = ExtensionHandlerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) -} -func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) -} -func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src) +func (x *ExtensionHandlerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExtensionHandlerResponse) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerResponse.Size(m) -} -func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m) + +func (*ExtensionHandlerResponse) ProtoMessage() {} + +func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo +// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{2} +} -func (m *ExtensionHandlerResponse) GetHandled() bool { - if m != nil { - return m.Handled +func (x *ExtensionHandlerResponse) GetHandled() bool { + if x != nil { + return x.Handled } return false } -func (m *ExtensionHandlerResponse) GetError() []string { - if m != nil { - return m.Error +func (x *ExtensionHandlerResponse) GetErrors() []string { + if x != nil { + return x.Errors } return nil } -func (m *ExtensionHandlerResponse) GetValue() *any.Any { - if m != nil { - return m.Value +func (x *ExtensionHandlerResponse) GetValue() *anypb.Any { + if x != nil { + return x.Value } return nil } type Wrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // version of the OpenAPI specification in which this extension was written. Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Name of the extension + // Name of the extension. ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` - // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // YAML-formatted extension value. + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` } -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{3} +func (x *Wrapper) Reset() { + *x = Wrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Wrapper) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Wrapper.Unmarshal(m, b) +func (x *Wrapper) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) -} -func (m *Wrapper) XXX_Merge(src proto.Message) { - xxx_messageInfo_Wrapper.Merge(m, src) -} -func (m *Wrapper) XXX_Size() int { - return xxx_messageInfo_Wrapper.Size(m) -} -func (m *Wrapper) XXX_DiscardUnknown() { - xxx_messageInfo_Wrapper.DiscardUnknown(m) + +func (*Wrapper) ProtoMessage() {} + +func (x *Wrapper) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Wrapper proto.InternalMessageInfo +// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. +func (*Wrapper) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{3} +} -func (m *Wrapper) GetVersion() string { - if m != nil { - return m.Version +func (x *Wrapper) GetVersion() string { + if x != nil { + return x.Version } return "" } -func (m *Wrapper) GetExtensionName() string { - if m != nil { - return m.ExtensionName +func (x *Wrapper) GetExtensionName() string { + if x != nil { + return x.ExtensionName } return "" } -func (m *Wrapper) GetYaml() string { - if m != nil { - return m.Yaml +func (x *Wrapper) GetYaml() string { + if x != nil { + return x.Yaml } return "" } -func init() { - proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") - proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") - proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") - proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") -} - -func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } - -var fileDescriptor_661e47e790f76671 = []byte{ - // 362 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40, - 0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50, - 0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7, - 0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85, - 0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f, - 0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71, - 0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e, - 0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d, - 0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef, - 0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35, - 0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14, - 0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39, - 0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08, - 0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81, - 0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3, - 0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8, - 0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35, - 0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0, - 0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18, - 0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09, - 0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d, - 0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00, - 0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, +var File_extensions_extension_proto protoreflect.FileDescriptor + +var file_extensions_extension_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, + 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_extensions_extension_proto_rawDescOnce sync.Once + file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc +) + +func file_extensions_extension_proto_rawDescGZIP() []byte { + file_extensions_extension_proto_rawDescOnce.Do(func() { + file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) + }) + return file_extensions_extension_proto_rawDescData +} + +var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_extensions_extension_proto_goTypes = []interface{}{ + (*Version)(nil), // 0: gnostic.extension.v1.Version + (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest + (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse + (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_extensions_extension_proto_depIdxs = []int32{ + 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper + 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version + 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_extensions_extension_proto_init() } +func file_extensions_extension_proto_init() { + if File_extensions_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Wrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_extensions_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_extensions_extension_proto_goTypes, + DependencyIndexes: file_extensions_extension_proto_depIdxs, + MessageInfos: file_extensions_extension_proto_msgTypes, + }.Build() + File_extensions_extension_proto = out.File + file_extensions_extension_proto_rawDesc = nil + file_extensions_extension_proto_goTypes = nil + file_extensions_extension_proto_depIdxs = nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto index 04856f913b176..875137c1a8606 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,9 @@ syntax = "proto3"; +package gnostic.extension.v1; + import "google/protobuf/any.proto"; -package openapiextension.v1; // This option lets the proto compiler generate Java code inside the package // name (see below) instead of inside an outer class. It creates a simpler @@ -26,7 +27,7 @@ option java_multiple_files = true; // The Java outer classname should be the filename in UpperCamelCase. This // class is only used to hold proto descriptor, so developers don't need to // work with it directly. -option java_outer_classname = "OpenAPIExtensionV1"; +option java_outer_classname = "GnosticExtension"; // The Java package name must be proto package name with proper prefix. option java_package = "org.gnostic.v1"; @@ -37,9 +38,13 @@ option java_package = "org.gnostic.v1"; // hopefully unique enough to not conflict with things that may come along in // the future. 'GPB' is reserved for the protocol buffer implementation itself. // -option objc_class_prefix = "OAE"; // "OpenAPI Extension" +// "Gnostic Extension" +option objc_class_prefix = "GNX"; + +// The Go package name. +option go_package = "./extensions;gnostic_extension_v1"; -// The version number of OpenAPI compiler. +// The version number of Gnostic. message Version { int32 major = 1; int32 minor = 2; @@ -52,12 +57,11 @@ message Version { // An encoded Request is written to the ExtensionHandler's stdin. message ExtensionHandlerRequest { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. + // The extension to process. Wrapper wrapper = 1; - // The version number of openapi compiler. - Version compiler_version = 3; + // The version number of Gnostic. + Version compiler_version = 2; } // The extensions writes an encoded ExtensionHandlerResponse to stdout. @@ -66,7 +70,7 @@ message ExtensionHandlerResponse { // true if the extension is handled by the extension handler; false otherwise bool handled = 1; - // Error message. If non-empty, the extension handling failed. + // Error message(s). If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. // @@ -75,7 +79,7 @@ message ExtensionHandlerResponse { // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. - repeated string error = 2; + repeated string errors = 2; // text output google.protobuf.Any value = 3; @@ -85,9 +89,9 @@ message Wrapper { // version of the OpenAPI specification in which this extension was written. string version = 1; - // Name of the extension + // Name of the extension. string extension_name = 2; - // Must be a valid yaml for the proto + // YAML-formatted extension value. string yaml = 3; } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go index 94a8e62a776e7..ec8afd009239e 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,71 +12,53 @@ // See the License for the specific language governing permissions and // limitations under the License. -package openapiextension_v1 +package gnostic_extension_v1 import ( - "fmt" "io/ioutil" + "log" "os" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" ) -type documentHandler func(version string, extensionName string, document string) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) -func forInputYamlFromOpenapic(handler documentHandler) { +// Main implements the main program of an extension handler. +func Main(handler extensionHandler) { + // unpack the request data, err := ioutil.ReadAll(os.Stdin) if err != nil { - fmt.Println("File error:", err.Error()) + log.Println("File error:", err.Error()) os.Exit(1) } if len(data) == 0 { - fmt.Println("No input data.") + log.Println("No input data.") os.Exit(1) } request := &ExtensionHandlerRequest{} err = proto.Unmarshal(data, request) if err != nil { - fmt.Println("Input error:", err.Error()) + log.Println("Input error:", err.Error()) os.Exit(1) } - handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) -} - -// ProcessExtension calles the handler for a specified extension. -func ProcessExtension(handleExtension extensionHandler) { - response := &ExtensionHandlerResponse{} - forInputYamlFromOpenapic( - func(version string, extensionName string, yamlInput string) { - var newObject proto.Message - var err error - - handled, newObject, err := handleExtension(extensionName, yamlInput) - if !handled { - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - - // If we reach here, then the extension is handled - response.Handled = true - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - response.Value, err = ptypes.MarshalAny(newObject) - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - }) - + // call the handler + handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) + // respond with the output of the handler + response := &ExtensionHandlerResponse{ + Handled: false, // default assumption + Errors: make([]string, 0), + } + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } else if handled { + response.Handled = true + response.Value, err = ptypes.MarshalAny(output) + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } + } responseBytes, _ := proto.Marshal(response) os.Stdout.Write(responseBytes) } diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/googleapis/gnostic/jsonschema/README.md new file mode 100644 index 0000000000000..6793c5179c807 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/README.md @@ -0,0 +1,4 @@ +# jsonschema + +This directory contains code for reading, writing, and manipulating JSON +schemas. diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/googleapis/gnostic/jsonschema/base.go new file mode 100644 index 0000000000000..0af8b148b9c0d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/base.go @@ -0,0 +1,84 @@ + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package jsonschema + +import ( + "encoding/base64" +) + +func baseSchemaBytes() ([]byte, error){ + return base64.StdEncoding.DecodeString( +`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi +JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl +c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK +ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg +ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp +bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp +dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl +ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK +ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v +bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg +ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki +LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p +bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s +CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog +ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg +ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog +ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg +ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog +ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 +IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog +ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 +ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl +ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw +ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg +ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg +ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg +ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg +IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 +aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg +ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg +ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg +ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK +ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi +ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP +ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy +ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg +ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv +ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi +OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl +SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs +dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k +ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk +cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl +cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh +ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg +ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk +ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk +ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 +IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi +b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 +LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl +cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv +bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog +ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq +ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg +ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg +ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg +ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg +ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu +aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh +bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl +cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs +CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs +ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg +ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg +ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy +YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 +IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg +fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 +IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/googleapis/gnostic/jsonschema/display.go new file mode 100644 index 0000000000000..028a760a91b59 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/display.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "strings" +) + +// +// DISPLAY +// The following methods display Schemas. +// + +// Description returns a string representation of a string or string array. +func (s *StringOrStringArray) Description() string { + if s.String != nil { + return *s.String + } + if s.StringArray != nil { + return strings.Join(*s.StringArray, ", ") + } + return "" +} + +// Returns a string representation of a Schema. +func (schema *Schema) String() string { + return schema.describeSchema("") +} + +// Helper: Returns a string representation of a Schema indented by a specified string. +func (schema *Schema) describeSchema(indent string) string { + result := "" + if schema.Schema != nil { + result += indent + "$schema: " + *(schema.Schema) + "\n" + } + if schema.ID != nil { + result += indent + "id: " + *(schema.ID) + "\n" + } + if schema.MultipleOf != nil { + result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) + } + if schema.Maximum != nil { + result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) + } + if schema.ExclusiveMaximum != nil { + result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) + } + if schema.ExclusiveMinimum != nil { + result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) + } + if schema.MinLength != nil { + result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) + } + if schema.Pattern != nil { + result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) + } + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + result += indent + "additionalItems:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalItems.Boolean) + result += indent + fmt.Sprintf("additionalItems: %+v\n", b) + } + } + if schema.Items != nil { + result += indent + "items:\n" + items := schema.Items + if items.SchemaArray != nil { + for i, s := range *(items.SchemaArray) { + result += indent + " " + fmt.Sprintf("%d", i) + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } else if items.Schema != nil { + result += items.Schema.describeSchema(indent + " " + " ") + } + } + if schema.MaxItems != nil { + result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) + } + if schema.MinItems != nil { + result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) + } + if schema.UniqueItems != nil { + result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) + } + if schema.MaxProperties != nil { + result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) + } + if schema.MinProperties != nil { + result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) + } + if schema.Required != nil { + result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) + } + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + result += indent + "additionalProperties:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalProperties.Boolean) + result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) + } + } + if schema.Properties != nil { + result += indent + "properties:\n" + for _, pair := range *(schema.Properties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.PatternProperties != nil { + result += indent + "patternProperties:\n" + for _, pair := range *(schema.PatternProperties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Dependencies != nil { + result += indent + "dependencies:\n" + for _, pair := range *(schema.Dependencies) { + name := pair.Name + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } else { + a := schemaOrStringArray.StringArray + if a != nil { + result += indent + " " + name + ":\n" + for _, s2 := range *a { + result += indent + " " + " " + s2 + "\n" + } + } + } + + } + } + if schema.Enumeration != nil { + result += indent + "enumeration:\n" + for _, value := range *(schema.Enumeration) { + if value.String != nil { + result += indent + " " + fmt.Sprintf("%+v\n", *value.String) + } else { + result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) + } + } + } + if schema.Type != nil { + result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) + } + if schema.AllOf != nil { + result += indent + "allOf:\n" + for _, s := range *(schema.AllOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.AnyOf != nil { + result += indent + "anyOf:\n" + for _, s := range *(schema.AnyOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.OneOf != nil { + result += indent + "oneOf:\n" + for _, s := range *(schema.OneOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.Not != nil { + result += indent + "not:\n" + result += schema.Not.describeSchema(indent + " ") + } + if schema.Definitions != nil { + result += indent + "definitions:\n" + for _, pair := range *(schema.Definitions) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Title != nil { + result += indent + "title: " + *(schema.Title) + "\n" + } + if schema.Description != nil { + result += indent + "description: " + *(schema.Description) + "\n" + } + if schema.Default != nil { + result += indent + "default:\n" + result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) + } + if schema.Format != nil { + result += indent + "format: " + *(schema.Format) + "\n" + } + if schema.Ref != nil { + result += indent + "$ref: " + *(schema.Ref) + "\n" + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/googleapis/gnostic/jsonschema/models.go new file mode 100644 index 0000000000000..4781bdc5f500c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/models.go @@ -0,0 +1,228 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonschema supports the reading, writing, and manipulation +// of JSON Schemas. +package jsonschema + +import "gopkg.in/yaml.v3" + +// The Schema struct models a JSON Schema and, because schemas are +// defined hierarchically, contains many references to itself. +// All fields are pointers and are nil if the associated values +// are not specified. +type Schema struct { + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + + // http://json-schema.org/latest/json-schema-validation.html + // 5.1. Validation keywords for numeric instances (number and integer) + MultipleOf *SchemaNumber + Maximum *SchemaNumber + ExclusiveMaximum *bool + Minimum *SchemaNumber + ExclusiveMinimum *bool + + // 5.2. Validation keywords for strings + MaxLength *int64 + MinLength *int64 + Pattern *string + + // 5.3. Validation keywords for arrays + AdditionalItems *SchemaOrBoolean + Items *SchemaOrSchemaArray + MaxItems *int64 + MinItems *int64 + UniqueItems *bool + + // 5.4. Validation keywords for objects + MaxProperties *int64 + MinProperties *int64 + Required *[]string + AdditionalProperties *SchemaOrBoolean + Properties *[]*NamedSchema + PatternProperties *[]*NamedSchema + Dependencies *[]*NamedSchemaOrStringArray + + // 5.5. Validation keywords for any instance type + Enumeration *[]SchemaEnumValue + Type *StringOrStringArray + AllOf *[]*Schema + AnyOf *[]*Schema + OneOf *[]*Schema + Not *Schema + Definitions *[]*NamedSchema + + // 6. Metadata keywords + Title *string + Description *string + Default *yaml.Node + + // 7. Semantic validation with "format" + Format *string +} + +// These helper structs represent "combination" types that generally can +// have values of one type or another. All are used to represent parts +// of Schemas. + +// SchemaNumber represents a value that can be either an Integer or a Float. +type SchemaNumber struct { + Integer *int64 + Float *float64 +} + +// NewSchemaNumberWithInteger creates and returns a new object +func NewSchemaNumberWithInteger(i int64) *SchemaNumber { + result := &SchemaNumber{} + result.Integer = &i + return result +} + +// NewSchemaNumberWithFloat creates and returns a new object +func NewSchemaNumberWithFloat(f float64) *SchemaNumber { + result := &SchemaNumber{} + result.Float = &f + return result +} + +// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. +type SchemaOrBoolean struct { + Schema *Schema + Boolean *bool +} + +// NewSchemaOrBooleanWithSchema creates and returns a new object +func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Schema = s + return result +} + +// NewSchemaOrBooleanWithBoolean creates and returns a new object +func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Boolean = &b + return result +} + +// StringOrStringArray represents a value that can be either +// a String or an Array of Strings. +type StringOrStringArray struct { + String *string + StringArray *[]string +} + +// NewStringOrStringArrayWithString creates and returns a new object +func NewStringOrStringArrayWithString(s string) *StringOrStringArray { + result := &StringOrStringArray{} + result.String = &s + return result +} + +// NewStringOrStringArrayWithStringArray creates and returns a new object +func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { + result := &StringOrStringArray{} + result.StringArray = &a + return result +} + +// SchemaOrStringArray represents a value that can be either +// a Schema or an Array of Strings. +type SchemaOrStringArray struct { + Schema *Schema + StringArray *[]string +} + +// SchemaOrSchemaArray represents a value that can be either +// a Schema or an Array of Schemas. +type SchemaOrSchemaArray struct { + Schema *Schema + SchemaArray *[]*Schema +} + +// NewSchemaOrSchemaArrayWithSchema creates and returns a new object +func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.Schema = s + return result +} + +// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object +func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.SchemaArray = &a + return result +} + +// SchemaEnumValue represents a value that can be part of an +// enumeration in a Schema. +type SchemaEnumValue struct { + String *string + Bool *bool +} + +// NamedSchema is a name-value pair that is used to emulate maps +// with ordered keys. +type NamedSchema struct { + Name string + Value *Schema +} + +// NewNamedSchema creates and returns a new object +func NewNamedSchema(name string, value *Schema) *NamedSchema { + return &NamedSchema{Name: name, Value: value} +} + +// NamedSchemaOrStringArray is a name-value pair that is used +// to emulate maps with ordered keys. +type NamedSchemaOrStringArray struct { + Name string + Value *SchemaOrStringArray +} + +// Access named subschemas by name + +func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { + if array == nil { + return nil + } + for _, pair := range *array { + if pair.Name == name { + return pair.Value + } + } + return nil +} + +// PropertyWithName returns the selected element. +func (s *Schema) PropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Properties, name) +} + +// PatternPropertyWithName returns the selected element. +func (s *Schema) PatternPropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.PatternProperties, name) +} + +// DefinitionWithName returns the selected element. +func (s *Schema) DefinitionWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Definitions, name) +} + +// AddProperty adds a named property. +func (s *Schema) AddProperty(name string, property *Schema) { + *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go new file mode 100644 index 0000000000000..ba8dd4a91b9e6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go @@ -0,0 +1,394 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "log" + "strings" +) + +// +// OPERATIONS +// The following methods perform operations on Schemas. +// + +// IsEmpty returns true if no members of the Schema are specified. +func (schema *Schema) IsEmpty() bool { + return (schema.Schema == nil) && + (schema.ID == nil) && + (schema.MultipleOf == nil) && + (schema.Maximum == nil) && + (schema.ExclusiveMaximum == nil) && + (schema.Minimum == nil) && + (schema.ExclusiveMinimum == nil) && + (schema.MaxLength == nil) && + (schema.MinLength == nil) && + (schema.Pattern == nil) && + (schema.AdditionalItems == nil) && + (schema.Items == nil) && + (schema.MaxItems == nil) && + (schema.MinItems == nil) && + (schema.UniqueItems == nil) && + (schema.MaxProperties == nil) && + (schema.MinProperties == nil) && + (schema.Required == nil) && + (schema.AdditionalProperties == nil) && + (schema.Properties == nil) && + (schema.PatternProperties == nil) && + (schema.Dependencies == nil) && + (schema.Enumeration == nil) && + (schema.Type == nil) && + (schema.AllOf == nil) && + (schema.AnyOf == nil) && + (schema.OneOf == nil) && + (schema.Not == nil) && + (schema.Definitions == nil) && + (schema.Title == nil) && + (schema.Description == nil) && + (schema.Default == nil) && + (schema.Format == nil) && + (schema.Ref == nil) +} + +// IsEqual returns true if two schemas are equal. +func (schema *Schema) IsEqual(schema2 *Schema) bool { + return schema.String() == schema2.String() +} + +// SchemaOperation represents a function that can be applied to a Schema. +type SchemaOperation func(schema *Schema, context string) + +// Applies a specified function to a Schema and all of the Schemas that it contains. +func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { + + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalItems") + } + } + + if schema.Items != nil { + if schema.Items.SchemaArray != nil { + for _, s := range *(schema.Items.SchemaArray) { + s.applyToSchemas(operation, "Items.SchemaArray") + } + } else if schema.Items.Schema != nil { + schema.Items.Schema.applyToSchemas(operation, "Items.Schema") + } + } + + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalProperties") + } + } + + if schema.Properties != nil { + for _, pair := range *(schema.Properties) { + s := pair.Value + s.applyToSchemas(operation, "Properties") + } + } + if schema.PatternProperties != nil { + for _, pair := range *(schema.PatternProperties) { + s := pair.Value + s.applyToSchemas(operation, "PatternProperties") + } + } + + if schema.Dependencies != nil { + for _, pair := range *(schema.Dependencies) { + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + s.applyToSchemas(operation, "Dependencies") + } + } + } + + if schema.AllOf != nil { + for _, s := range *(schema.AllOf) { + s.applyToSchemas(operation, "AllOf") + } + } + if schema.AnyOf != nil { + for _, s := range *(schema.AnyOf) { + s.applyToSchemas(operation, "AnyOf") + } + } + if schema.OneOf != nil { + for _, s := range *(schema.OneOf) { + s.applyToSchemas(operation, "OneOf") + } + } + if schema.Not != nil { + schema.Not.applyToSchemas(operation, "Not") + } + + if schema.Definitions != nil { + for _, pair := range *(schema.Definitions) { + s := pair.Value + s.applyToSchemas(operation, "Definitions") + } + } + + operation(schema, context) +} + +// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. +func (schema *Schema) CopyProperties(source *Schema) { + if source.Schema != nil { + schema.Schema = source.Schema + } + if source.ID != nil { + schema.ID = source.ID + } + if source.MultipleOf != nil { + schema.MultipleOf = source.MultipleOf + } + if source.Maximum != nil { + schema.Maximum = source.Maximum + } + if source.ExclusiveMaximum != nil { + schema.ExclusiveMaximum = source.ExclusiveMaximum + } + if source.Minimum != nil { + schema.Minimum = source.Minimum + } + if source.ExclusiveMinimum != nil { + schema.ExclusiveMinimum = source.ExclusiveMinimum + } + if source.MaxLength != nil { + schema.MaxLength = source.MaxLength + } + if source.MinLength != nil { + schema.MinLength = source.MinLength + } + if source.Pattern != nil { + schema.Pattern = source.Pattern + } + if source.AdditionalItems != nil { + schema.AdditionalItems = source.AdditionalItems + } + if source.Items != nil { + schema.Items = source.Items + } + if source.MaxItems != nil { + schema.MaxItems = source.MaxItems + } + if source.MinItems != nil { + schema.MinItems = source.MinItems + } + if source.UniqueItems != nil { + schema.UniqueItems = source.UniqueItems + } + if source.MaxProperties != nil { + schema.MaxProperties = source.MaxProperties + } + if source.MinProperties != nil { + schema.MinProperties = source.MinProperties + } + if source.Required != nil { + schema.Required = source.Required + } + if source.AdditionalProperties != nil { + schema.AdditionalProperties = source.AdditionalProperties + } + if source.Properties != nil { + schema.Properties = source.Properties + } + if source.PatternProperties != nil { + schema.PatternProperties = source.PatternProperties + } + if source.Dependencies != nil { + schema.Dependencies = source.Dependencies + } + if source.Enumeration != nil { + schema.Enumeration = source.Enumeration + } + if source.Type != nil { + schema.Type = source.Type + } + if source.AllOf != nil { + schema.AllOf = source.AllOf + } + if source.AnyOf != nil { + schema.AnyOf = source.AnyOf + } + if source.OneOf != nil { + schema.OneOf = source.OneOf + } + if source.Not != nil { + schema.Not = source.Not + } + if source.Definitions != nil { + schema.Definitions = source.Definitions + } + if source.Title != nil { + schema.Title = source.Title + } + if source.Description != nil { + schema.Description = source.Description + } + if source.Default != nil { + schema.Default = source.Default + } + if source.Format != nil { + schema.Format = source.Format + } + if source.Ref != nil { + schema.Ref = source.Ref + } +} + +// TypeIs returns true if the Type of a Schema includes the specified type +func (schema *Schema) TypeIs(typeName string) bool { + if schema.Type != nil { + // the schema Type is either a string or an array of strings + if schema.Type.String != nil { + return (*(schema.Type.String) == typeName) + } else if schema.Type.StringArray != nil { + for _, n := range *(schema.Type.StringArray) { + if n == typeName { + return true + } + } + } + } + return false +} + +// ResolveRefs resolves "$ref" elements in a Schema and its children. +// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, +// the reference is kept and we expect downstream tools to separately model these +// referenced schemas. +func (schema *Schema) ResolveRefs() { + rootSchema := schema + count := 1 + for count > 0 { + count = 0 + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.Ref != nil { + resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) + if err != nil { + log.Printf("%+v", err) + } else if resolvedRef.TypeIs("object") { + // don't substitute for objects, we'll model the referenced schema with a class + } else if context == "OneOf" { + // don't substitute for references inside oneOf declarations + } else if resolvedRef.OneOf != nil { + // don't substitute for references that contain oneOf declarations + } else if resolvedRef.AdditionalProperties != nil { + // don't substitute for references that look like objects + } else { + schema.Ref = nil + schema.CopyProperties(resolvedRef) + count++ + } + } + }, "") + } +} + +// resolveJSONPointer resolves JSON pointers. +// This current implementation is very crude and custom for OpenAPI 2.0 schemas. +// It panics for any pointer that it is unable to resolve. +func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { + parts := strings.Split(ref, "#") + if len(parts) == 2 { + documentName := parts[0] + "#" + if documentName == "#" && schema.ID != nil { + documentName = *(schema.ID) + } + path := parts[1] + document := schemas[documentName] + pathParts := strings.Split(path, "/") + + // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases + if len(pathParts) == 1 { + return document, nil + } else if len(pathParts) == 3 { + switch pathParts[1] { + case "definitions": + dictionary := document.Definitions + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + case "properties": + dictionary := document.Properties + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + default: + break + } + } + } + if result == nil { + return nil, fmt.Errorf("unresolved pointer: %+v", ref) + } + return result, nil +} + +// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. +func (schema *Schema) ResolveAllOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AllOf != nil { + for _, allOf := range *(schema.AllOf) { + schema.CopyProperties(allOf) + } + schema.AllOf = nil + } + }, "resolveAllOfs") +} + +// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". +func (schema *Schema) ResolveAnyOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AnyOf != nil { + schema.OneOf = schema.AnyOf + schema.AnyOf = nil + } + }, "resolveAnyOfs") +} + +// return a pointer to a copy of a passed-in string +func stringptr(input string) (output *string) { + return &input +} + +// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperty(name string) { + *schema.Properties = append(*schema.Properties, + NewNamedSchema(name, + &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) +} + +// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperties(names []string) { + for _, name := range names { + schema.CopyOfficialSchemaProperty(name) + } +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go new file mode 100644 index 0000000000000..b8583d4660234 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go @@ -0,0 +1,442 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run generate-base.go + +package jsonschema + +import ( + "fmt" + "io/ioutil" + "strconv" + + "gopkg.in/yaml.v3" +) + +// This is a global map of all known Schemas. +// It is initialized when the first Schema is created and inserted. +var schemas map[string]*Schema + +// NewBaseSchema builds a schema object from an embedded json representation. +func NewBaseSchema() (schema *Schema, err error) { + b, err := baseSchemaBytes() + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(b, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromFile reads a schema from a file. +// Currently this assumes that schemas are stored in the source distribution of this project. +func NewSchemaFromFile(filename string) (schema *Schema, err error) { + file, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(file, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromObject constructs a schema from a parsed JSON object. +// Due to the complexity of the schema representation, this is a +// custom reader and not the standard Go JSON reader (encoding/json). +func NewSchemaFromObject(jsonData *yaml.Node) *Schema { + switch jsonData.Kind { + case yaml.DocumentNode: + return NewSchemaFromObject(jsonData.Content[0]) + case yaml.MappingNode: + schema := &Schema{} + + for i := 0; i < len(jsonData.Content); i += 2 { + k := jsonData.Content[i].Value + v := jsonData.Content[i+1] + + switch k { + case "$schema": + schema.Schema = schema.stringValue(v) + case "id": + schema.ID = schema.stringValue(v) + + case "multipleOf": + schema.MultipleOf = schema.numberValue(v) + case "maximum": + schema.Maximum = schema.numberValue(v) + case "exclusiveMaximum": + schema.ExclusiveMaximum = schema.boolValue(v) + case "minimum": + schema.Minimum = schema.numberValue(v) + case "exclusiveMinimum": + schema.ExclusiveMinimum = schema.boolValue(v) + + case "maxLength": + schema.MaxLength = schema.intValue(v) + case "minLength": + schema.MinLength = schema.intValue(v) + case "pattern": + schema.Pattern = schema.stringValue(v) + + case "additionalItems": + schema.AdditionalItems = schema.schemaOrBooleanValue(v) + case "items": + schema.Items = schema.schemaOrSchemaArrayValue(v) + case "maxItems": + schema.MaxItems = schema.intValue(v) + case "minItems": + schema.MinItems = schema.intValue(v) + case "uniqueItems": + schema.UniqueItems = schema.boolValue(v) + + case "maxProperties": + schema.MaxProperties = schema.intValue(v) + case "minProperties": + schema.MinProperties = schema.intValue(v) + case "required": + schema.Required = schema.arrayOfStringsValue(v) + case "additionalProperties": + schema.AdditionalProperties = schema.schemaOrBooleanValue(v) + case "properties": + schema.Properties = schema.mapOfSchemasValue(v) + case "patternProperties": + schema.PatternProperties = schema.mapOfSchemasValue(v) + case "dependencies": + schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) + + case "enum": + schema.Enumeration = schema.arrayOfEnumValuesValue(v) + + case "type": + schema.Type = schema.stringOrStringArrayValue(v) + case "allOf": + schema.AllOf = schema.arrayOfSchemasValue(v) + case "anyOf": + schema.AnyOf = schema.arrayOfSchemasValue(v) + case "oneOf": + schema.OneOf = schema.arrayOfSchemasValue(v) + case "not": + schema.Not = NewSchemaFromObject(v) + case "definitions": + schema.Definitions = schema.mapOfSchemasValue(v) + + case "title": + schema.Title = schema.stringValue(v) + case "description": + schema.Description = schema.stringValue(v) + + case "default": + schema.Default = v + + case "format": + schema.Format = schema.stringValue(v) + case "$ref": + schema.Ref = schema.stringValue(v) + default: + fmt.Printf("UNSUPPORTED (%s)\n", k) + } + } + + // insert schema in global map + if schema.ID != nil { + if schemas == nil { + schemas = make(map[string]*Schema, 0) + } + schemas[*(schema.ID)] = schema + } + return schema + + default: + fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil + } + + return nil +} + +// +// BUILDERS +// The following methods build elements of Schemas from interface{} values. +// Each returns nil if it is unable to build the desired element. +// + +// Gets the string value of an interface{} value if possible. +func (schema *Schema) stringValue(v *yaml.Node) *string { + switch v.Kind { + case yaml.ScalarNode: + return &v.Value + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the numeric value of an interface{} value if possible. +func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { + number := &SchemaNumber{} + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + number.Float = &v2 + return number + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + number.Integer = &v2 + return number + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the integer value of an interface{} value if possible. +func (schema *Schema) intValue(v *yaml.Node) *int64 { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + v3 := int64(v2) + return &v3 + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + return &v2 + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the bool value of an interface{} value if possible. +func (schema *Schema) boolValue(v *yaml.Node) *bool { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!bool": + v2, _ := strconv.ParseBool(v.Value) + return &v2 + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a map of Schemas from an interface{} value if possible. +func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { + switch v.Kind { + case yaml.MappingNode: + m := make([]*NamedSchema, 0) + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} + m = append(m, pair) + } + return &m + default: + fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of Schemas from an interface{} value if possible. +func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) + } + } + return &m + case yaml.MappingNode: + m := make([]*Schema, 0) + s := NewSchemaFromObject(v) + m = append(m, s) + return &m + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a Schema or an array of Schemas from an interface{} value if possible. +func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) + } + } + return &SchemaOrSchemaArray{SchemaArray: &m} + case yaml.MappingNode: + s := NewSchemaFromObject(v) + return &SchemaOrSchemaArray{Schema: s} + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of strings from an interface{} value if possible. +func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { + switch v.Kind { + case yaml.ScalarNode: + a := []string{v.Value} + return &a + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + return &a + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a string or an array of strings from an interface{} value if possible. +func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { + switch v.Kind { + case yaml.ScalarNode: + s := &StringOrStringArray{} + s.String = &v.Value + return s + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + s := &StringOrStringArray{} + s.StringArray = &a + return s + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of enum values from an interface{} value if possible. +func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { + a := make([]SchemaEnumValue, 0) + switch v.Kind { + case yaml.SequenceNode: + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + switch v2.Tag { + case "!!str": + a = append(a, SchemaEnumValue{String: &v2.Value}) + case "!!bool": + v3, _ := strconv.ParseBool(v2.Value) + a = append(a, SchemaEnumValue{Bool: &v3}) + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) + } + return &a +} + +// Gets a map of schemas or string arrays from an interface{} value if possible. +func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { + m := make([]*NamedSchemaOrStringArray, 0) + switch v.Kind { + case yaml.MappingNode: + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + switch v2.Kind { + case yaml.SequenceNode: + a := make([]string, 0) + for _, v3 := range v2.Content { + switch v3.Kind { + case yaml.ScalarNode: + a = append(a, v3.Value) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) + } + } + s := &SchemaOrStringArray{} + s.StringArray = &a + pair := &NamedSchemaOrStringArray{Name: k2, Value: s} + m = append(m, pair) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) + } + return &m +} + +// Gets a schema or a boolean value from an interface{} value if possible. +func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { + schemaOrBoolean := &SchemaOrBoolean{} + switch v.Kind { + case yaml.ScalarNode: + v2, _ := strconv.ParseBool(v.Value) + schemaOrBoolean.Boolean = &v2 + case yaml.MappingNode: + schemaOrBoolean.Schema = NewSchemaFromObject(v) + default: + fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) + } + return schemaOrBoolean +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json new file mode 100644 index 0000000000000..85eb502a680ed --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json @@ -0,0 +1,150 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uri" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go new file mode 100644 index 0000000000000..340dc5f933061 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go @@ -0,0 +1,369 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +const indentation = " " + +func renderMappingNode(node *yaml.Node, indent string) (result string) { + result = "{\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i += 2 { + // first print the key + key := node.Content[i].Value + result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) + // then the value + value := node.Content[i+1] + switch value.Kind { + case yaml.ScalarNode: + result += "\"" + value.Value + "\"" + case yaml.MappingNode: + result += renderMappingNode(value, innerIndent) + case yaml.SequenceNode: + result += renderSequenceNode(value, innerIndent) + default: + result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) + } + if i < len(node.Content)-2 { + result += "," + } + result += "\n" + } + + result += indent + "}" + return result +} + +func renderSequenceNode(node *yaml.Node, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i++ { + item := node.Content[i] + switch item.Kind { + case yaml.ScalarNode: + result += innerIndent + "\"" + item.Value + "\"" + case yaml.MappingNode: + result += innerIndent + renderMappingNode(item, innerIndent) + "" + default: + result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) + } + if i < len(node.Content)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +func renderStringArray(array []string, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i, item := range array { + result += innerIndent + "\"" + item + "\"" + if i < len(array)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +// Render renders a yaml.Node as JSON +func Render(node *yaml.Node) string { + if node.Kind == yaml.DocumentNode { + if len(node.Content) == 1 { + return Render(node.Content[0]) + } + } else if node.Kind == yaml.MappingNode { + return renderMappingNode(node, "") + "\n" + } else if node.Kind == yaml.SequenceNode { + return renderSequenceNode(node, "") + "\n" + } + return "" +} + +func (object *SchemaNumber) nodeValue() *yaml.Node { + if object.Integer != nil { + return nodeForInt64(*object.Integer) + } else if object.Float != nil { + return nodeForFloat64(*object.Float) + } else { + return nil + } +} + +func (object *SchemaOrBoolean) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.Boolean != nil { + return nodeForBoolean(*object.Boolean) + } else { + return nil + } +} + +func nodeForStringArray(array []string) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, nodeForString(item)) + } + return nodeForSequence(content) +} + +func nodeForSchemaArray(array []*Schema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func (object *StringOrStringArray) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrStringArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.SchemaArray != nil { + return nodeForSchemaArray(*(object.SchemaArray)) + } else { + return nil + } +} + +func (object *SchemaEnumValue) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.Bool != nil { + return nodeForBoolean(*object.Bool) + } else { + return nil + } +} + +func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range *array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func nodeForMapping(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: content, + } +} + +func nodeForSequence(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.SequenceNode, + Content: content, + } +} + +func nodeForString(value string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: value, + } +} + +func nodeForBoolean(value bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", value), + } +} + +func nodeForInt64(value int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", value), + } +} + +func nodeForFloat64(value float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%f", value), + } +} + +func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { + nodes = append(nodes, nodeForString(name)) + nodes = append(nodes, value) + return nodes +} + +func (schema *Schema) nodeValue() *yaml.Node { + n := &yaml.Node{Kind: yaml.MappingNode} + content := make([]*yaml.Node, 0) + if schema.Title != nil { + content = appendPair(content, "title", nodeForString(*schema.Title)) + } + if schema.ID != nil { + content = appendPair(content, "id", nodeForString(*schema.ID)) + } + if schema.Schema != nil { + content = appendPair(content, "$schema", nodeForString(*schema.Schema)) + } + if schema.Type != nil { + content = appendPair(content, "type", schema.Type.nodeValue()) + } + if schema.Items != nil { + content = appendPair(content, "items", schema.Items.nodeValue()) + } + if schema.Description != nil { + content = appendPair(content, "description", nodeForString(*schema.Description)) + } + if schema.Required != nil { + content = appendPair(content, "required", nodeForStringArray(*schema.Required)) + } + if schema.AdditionalProperties != nil { + content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) + } + if schema.PatternProperties != nil { + content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) + } + if schema.Properties != nil { + content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) + } + if schema.Dependencies != nil { + content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) + } + if schema.Ref != nil { + content = appendPair(content, "$ref", nodeForString(*schema.Ref)) + } + if schema.MultipleOf != nil { + content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) + } + if schema.Maximum != nil { + content = appendPair(content, "maximum", schema.Maximum.nodeValue()) + } + if schema.ExclusiveMaximum != nil { + content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + content = appendPair(content, "minimum", schema.Minimum.nodeValue()) + } + if schema.ExclusiveMinimum != nil { + content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) + } + if schema.MinLength != nil { + content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) + } + if schema.Pattern != nil { + content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) + } + if schema.AdditionalItems != nil { + content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) + } + if schema.MaxItems != nil { + content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) + } + if schema.MinItems != nil { + content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) + } + if schema.UniqueItems != nil { + content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) + } + if schema.MaxProperties != nil { + content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) + } + if schema.MinProperties != nil { + content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) + } + if schema.Enumeration != nil { + content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) + } + if schema.AllOf != nil { + content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) + } + if schema.AnyOf != nil { + content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) + } + if schema.OneOf != nil { + content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) + } + if schema.Not != nil { + content = appendPair(content, "not", schema.Not.nodeValue()) + } + if schema.Definitions != nil { + content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) + } + if schema.Default != nil { + // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) + } + if schema.Format != nil { + content = appendPair(content, "format", nodeForString(*schema.Format)) + } + n.Content = content + return n +} + +// JSONString returns a json representation of a schema. +func (schema *Schema) JSONString() string { + node := schema.nodeValue() + return Render(node) +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go index 4fd44c45e228b..727d7f4ad5258 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package openapi_v2 import ( "fmt" "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "regexp" "strings" ) @@ -30,7 +30,7 @@ func Version() string { } // NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { errors := make([]error, 0) x := &AdditionalPropertiesItem{} matched := false @@ -39,7 +39,7 @@ func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*Ad m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) if matchingError == nil { x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} matched = true @@ -49,28 +49,33 @@ func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*Ad } } // bool boolean = 2; - boolValue, ok := in.(bool) + boolValue, ok := compiler.BoolForScalarNode(in) if ok { x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true } if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in interface{}, context *compiler.Context) (*Any, error) { +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { errors := make([]error, 0) x := &Any{} - bytes, _ := yaml.Marshal(in) + bytes := compiler.Marshal(in) x.Yaml = string(bytes) return x, compiler.NewErrorGroupOrNil(errors) } // NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { +func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { errors := make([]error, 0) x := &ApiKeySecurity{} m, ok := compiler.UnpackMap(in) @@ -94,74 +99,74 @@ func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecuri // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [apiKey] if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 2; v2 := compiler.MapValueForKey(m, "name") if v2 != nil { - x.Name, ok = v2.(string) + x.Name, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 3; v3 := compiler.MapValueForKey(m, "in") if v3 != nil { - x.In, ok = v3.(string) + x.In, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [header query] if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 4; v4 := compiler.MapValueForKey(m, "description") if v4 != nil { - x.Description, ok = v4.(string) + x.Description, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 5; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -175,7 +180,7 @@ func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecuri } // NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { +func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { errors := make([]error, 0) x := &BasicAuthenticationSecurity{} m, ok := compiler.UnpackMap(in) @@ -199,50 +204,50 @@ func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) ( // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [basic] if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { - x.Description, ok = v2.(string) + x.Description, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -256,7 +261,7 @@ func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) ( } // NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { +func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { errors := make([]error, 0) x := &BodyParameter{} m, ok := compiler.UnpackMap(in) @@ -280,42 +285,42 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { - x.Description, ok = v1.(string) + x.Description, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 2; v2 := compiler.MapValueForKey(m, "name") if v2 != nil { - x.Name, ok = v2.(string) + x.Name, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 3; v3 := compiler.MapValueForKey(m, "in") if v3 != nil { - x.In, ok = v3.(string) + x.In, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [body] if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // bool required = 4; v4 := compiler.MapValueForKey(m, "required") if v4 != nil { - x.Required, ok = v4.(bool) + x.Required, ok = compiler.BoolForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -323,7 +328,7 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter v5 := compiler.MapValueForKey(m, "schema") if v5 != nil { var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context)) if err != nil { errors = append(errors, err) } @@ -331,26 +336,26 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -364,7 +369,7 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter } // NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { errors := make([]error, 0) x := &Contact{} m, ok := compiler.UnpackMap(in) @@ -382,53 +387,53 @@ func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { - x.Url, ok = v2.(string) + x.Url, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string email = 3; v3 := compiler.MapValueForKey(m, "email") if v3 != nil { - x.Email, ok = v3.(string) + x.Email, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 4; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -442,7 +447,7 @@ func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { } // NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { +func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { errors := make([]error, 0) x := &Default{} m, ok := compiler.UnpackMap(in) @@ -453,25 +458,25 @@ func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -484,7 +489,7 @@ func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { } // NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { +func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { errors := make([]error, 0) x := &Definitions{} m, ok := compiler.UnpackMap(in) @@ -495,14 +500,14 @@ func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, er // repeated NamedSchema additional_properties = 1; // MAP: Schema x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedSchema{} pair.Name = k var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -514,7 +519,7 @@ func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, er } // NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { errors := make([]error, 0) x := &Document{} m, ok := compiler.UnpackMap(in) @@ -538,15 +543,15 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { // string swagger = 1; v1 := compiler.MapValueForKey(m, "swagger") if v1 != nil { - x.Swagger, ok = v1.(string) + x.Swagger, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [2.0] if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -554,7 +559,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v2 := compiler.MapValueForKey(m, "info") if v2 != nil { var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) if err != nil { errors = append(errors, err) } @@ -562,57 +567,57 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { // string host = 3; v3 := compiler.MapValueForKey(m, "host") if v3 != nil { - x.Host, ok = v3.(string) + x.Host, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string base_path = 4; v4 := compiler.MapValueForKey(m, "basePath") if v4 != nil { - x.BasePath, ok = v4.(string) + x.BasePath, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string schemes = 5; v5 := compiler.MapValueForKey(m, "schemes") if v5 != nil { - v, ok := v5.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v5) if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Schemes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [http https ws wss] if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string consumes = 6; v6 := compiler.MapValueForKey(m, "consumes") if v6 != nil { - v, ok := v6.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v6) if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Consumes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string produces = 7; v7 := compiler.MapValueForKey(m, "produces") if v7 != nil { - v, ok := v7.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v7) if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + x.Produces = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -620,7 +625,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v8 := compiler.MapValueForKey(m, "paths") if v8 != nil { var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context)) if err != nil { errors = append(errors, err) } @@ -629,7 +634,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v9 := compiler.MapValueForKey(m, "definitions") if v9 != nil { var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context)) if err != nil { errors = append(errors, err) } @@ -638,7 +643,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v10 := compiler.MapValueForKey(m, "parameters") if v10 != nil { var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context)) if err != nil { errors = append(errors, err) } @@ -647,7 +652,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v11 := compiler.MapValueForKey(m, "responses") if v11 != nil { var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context)) if err != nil { errors = append(errors, err) } @@ -657,10 +662,10 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { if v12 != nil { // repeated SecurityRequirement x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v12) if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) if err != nil { errors = append(errors, err) } @@ -672,7 +677,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v13 := compiler.MapValueForKey(m, "securityDefinitions") if v13 != nil { var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context)) if err != nil { errors = append(errors, err) } @@ -682,10 +687,10 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { if v14 != nil { // repeated Tag x.Tags = make([]*Tag, 0) - a, ok := v14.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v14) if ok { - for _, item := range a { - y, err := NewTag(item, compiler.NewContext("tags", context)) + for _, item := range a.Content { + y, err := NewTag(item, compiler.NewContext("tags", item, context)) if err != nil { errors = append(errors, err) } @@ -697,7 +702,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { v15 := compiler.MapValueForKey(m, "externalDocs") if v15 != nil { var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context)) if err != nil { errors = append(errors, err) } @@ -705,26 +710,26 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { // repeated NamedAny vendor_extension = 16; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -738,7 +743,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { } // NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { +func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { errors := make([]error, 0) x := &Examples{} m, ok := compiler.UnpackMap(in) @@ -749,25 +754,25 @@ func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -780,7 +785,7 @@ func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { } // NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { errors := make([]error, 0) x := &ExternalDocs{} m, ok := compiler.UnpackMap(in) @@ -804,44 +809,44 @@ func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { - x.Description, ok = v1.(string) + x.Description, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { - x.Url, ok = v2.(string) + x.Url, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -855,7 +860,7 @@ func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, } // NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { +func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { errors := make([]error, 0) x := &FileSchema{} m, ok := compiler.UnpackMap(in) @@ -879,27 +884,27 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro // string format = 1; v1 := compiler.MapValueForKey(m, "format") if v1 != nil { - x.Format, ok = v1.(string) + x.Format, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string title = 2; v2 := compiler.MapValueForKey(m, "title") if v2 != nil { - x.Title, ok = v2.(string) + x.Title, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } @@ -907,7 +912,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro v4 := compiler.MapValueForKey(m, "default") if v4 != nil { var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context)) if err != nil { errors = append(errors, err) } @@ -915,35 +920,35 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro // repeated string required = 5; v5 := compiler.MapValueForKey(m, "required") if v5 != nil { - v, ok := v5.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v5) if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + x.Required = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil { - x.Type, ok = v6.(string) + x.Type, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [file] if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // bool read_only = 7; v7 := compiler.MapValueForKey(m, "readOnly") if v7 != nil { - x.ReadOnly, ok = v7.(bool) + x.ReadOnly, ok = compiler.BoolForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -951,7 +956,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro v8 := compiler.MapValueForKey(m, "externalDocs") if v8 != nil { var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) if err != nil { errors = append(errors, err) } @@ -960,7 +965,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro v9 := compiler.MapValueForKey(m, "example") if v9 != nil { var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) if err != nil { errors = append(errors, err) } @@ -968,26 +973,26 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro // repeated NamedAny vendor_extension = 10; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -1001,7 +1006,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro } // NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { +func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { errors := make([]error, 0) x := &FormDataParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -1019,75 +1024,75 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [formData] if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // bool allow_empty_value = 5; v5 := compiler.MapValueForKey(m, "allowEmptyValue") if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil { - x.Type, ok = v6.(string) + x.Type, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array file] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 7; v7 := compiler.MapValueForKey(m, "format") if v7 != nil { - x.Format, ok = v7.(string) + x.Format, ok = compiler.StringForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1095,7 +1100,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* v8 := compiler.MapValueForKey(m, "items") if v8 != nil { var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) if err != nil { errors = append(errors, err) } @@ -1103,15 +1108,15 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // string collection_format = 9; v9 := compiler.MapValueForKey(m, "collectionFormat") if v9 != nil { - x.CollectionFormat, ok = v9.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes multi] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1119,7 +1124,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* v10 := compiler.MapValueForKey(m, "default") if v10 != nil { var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) if err != nil { errors = append(errors, err) } @@ -1127,126 +1132,102 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // float maximum = 11; v11 := compiler.MapValueForKey(m, "maximum") if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 12; v12 := compiler.MapValueForKey(m, "exclusiveMaximum") if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 13; v13 := compiler.MapValueForKey(m, "minimum") if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 14; v14 := compiler.MapValueForKey(m, "exclusiveMinimum") if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 15; v15 := compiler.MapValueForKey(m, "maxLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 16; v16 := compiler.MapValueForKey(m, "minLength") if v16 != nil { - t, ok := v16.(int) + t, ok := compiler.IntForScalarNode(v16) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 17; v17 := compiler.MapValueForKey(m, "pattern") if v17 != nil { - x.Pattern, ok = v17.(string) + x.Pattern, ok = compiler.StringForScalarNode(v17) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 18; v18 := compiler.MapValueForKey(m, "maxItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 19; v19 := compiler.MapValueForKey(m, "minItems") if v19 != nil { - t, ok := v19.(int) + t, ok := compiler.IntForScalarNode(v19) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 20; v20 := compiler.MapValueForKey(m, "uniqueItems") if v20 != nil { - x.UniqueItems, ok = v20.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1255,10 +1236,10 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* if v21 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v21) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -1269,49 +1250,37 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // float multiple_of = 22; v22 := compiler.MapValueForKey(m, "multipleOf") if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 23; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -1325,7 +1294,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* } // NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { errors := make([]error, 0) x := &Header{} m, ok := compiler.UnpackMap(in) @@ -1349,24 +1318,24 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number integer boolean array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 2; v2 := compiler.MapValueForKey(m, "format") if v2 != nil { - x.Format, ok = v2.(string) + x.Format, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1374,7 +1343,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { v3 := compiler.MapValueForKey(m, "items") if v3 != nil { var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) if err != nil { errors = append(errors, err) } @@ -1382,15 +1351,15 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // string collection_format = 4; v4 := compiler.MapValueForKey(m, "collectionFormat") if v4 != nil { - x.CollectionFormat, ok = v4.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1398,7 +1367,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { v5 := compiler.MapValueForKey(m, "default") if v5 != nil { var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) if err != nil { errors = append(errors, err) } @@ -1406,126 +1375,102 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // float maximum = 6; v6 := compiler.MapValueForKey(m, "maximum") if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 7; v7 := compiler.MapValueForKey(m, "exclusiveMaximum") if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 8; v8 := compiler.MapValueForKey(m, "minimum") if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 9; v9 := compiler.MapValueForKey(m, "exclusiveMinimum") if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 10; v10 := compiler.MapValueForKey(m, "maxLength") if v10 != nil { - t, ok := v10.(int) + t, ok := compiler.IntForScalarNode(v10) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 11; v11 := compiler.MapValueForKey(m, "minLength") if v11 != nil { - t, ok := v11.(int) + t, ok := compiler.IntForScalarNode(v11) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 12; v12 := compiler.MapValueForKey(m, "pattern") if v12 != nil { - x.Pattern, ok = v12.(string) + x.Pattern, ok = compiler.StringForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 13; v13 := compiler.MapValueForKey(m, "maxItems") if v13 != nil { - t, ok := v13.(int) + t, ok := compiler.IntForScalarNode(v13) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 14; v14 := compiler.MapValueForKey(m, "minItems") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 15; v15 := compiler.MapValueForKey(m, "uniqueItems") if v15 != nil { - x.UniqueItems, ok = v15.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1534,10 +1479,10 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { if v16 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v16) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -1548,58 +1493,46 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // float multiple_of = 17; v17 := compiler.MapValueForKey(m, "multipleOf") if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 18; v18 := compiler.MapValueForKey(m, "description") if v18 != nil { - x.Description, ok = v18.(string) + x.Description, ok = compiler.StringForScalarNode(v18) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 19; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -1613,7 +1546,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { } // NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { +func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { errors := make([]error, 0) x := &HeaderParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -1631,66 +1564,66 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [header] if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 5; v5 := compiler.MapValueForKey(m, "type") if v5 != nil { - x.Type, ok = v5.(string) + x.Type, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 6; v6 := compiler.MapValueForKey(m, "format") if v6 != nil { - x.Format, ok = v6.(string) + x.Format, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1698,7 +1631,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He v7 := compiler.MapValueForKey(m, "items") if v7 != nil { var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) if err != nil { errors = append(errors, err) } @@ -1706,15 +1639,15 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // string collection_format = 8; v8 := compiler.MapValueForKey(m, "collectionFormat") if v8 != nil { - x.CollectionFormat, ok = v8.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1722,7 +1655,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He v9 := compiler.MapValueForKey(m, "default") if v9 != nil { var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) if err != nil { errors = append(errors, err) } @@ -1730,126 +1663,102 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // float maximum = 10; v10 := compiler.MapValueForKey(m, "maximum") if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 11; v11 := compiler.MapValueForKey(m, "exclusiveMaximum") if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 12; v12 := compiler.MapValueForKey(m, "minimum") if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 13; v13 := compiler.MapValueForKey(m, "exclusiveMinimum") if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 14; v14 := compiler.MapValueForKey(m, "maxLength") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 15; v15 := compiler.MapValueForKey(m, "minLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 16; v16 := compiler.MapValueForKey(m, "pattern") if v16 != nil { - x.Pattern, ok = v16.(string) + x.Pattern, ok = compiler.StringForScalarNode(v16) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 17; v17 := compiler.MapValueForKey(m, "maxItems") if v17 != nil { - t, ok := v17.(int) + t, ok := compiler.IntForScalarNode(v17) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 18; v18 := compiler.MapValueForKey(m, "minItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 19; v19 := compiler.MapValueForKey(m, "uniqueItems") if v19 != nil { - x.UniqueItems, ok = v19.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1858,10 +1767,10 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He if v20 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v20) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -1872,49 +1781,37 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // float multiple_of = 21; v21 := compiler.MapValueForKey(m, "multipleOf") if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 22; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -1928,7 +1825,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He } // NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { +func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { errors := make([]error, 0) x := &Headers{} m, ok := compiler.UnpackMap(in) @@ -1939,14 +1836,14 @@ func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { // repeated NamedHeader additional_properties = 1; // MAP: Header x.AdditionalProperties = make([]*NamedHeader, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedHeader{} pair.Name = k var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + pair.Value, err = NewHeader(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -1958,7 +1855,7 @@ func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { } // NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { errors := make([]error, 0) x := &Info{} m, ok := compiler.UnpackMap(in) @@ -1982,36 +1879,36 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { // string title = 1; v1 := compiler.MapValueForKey(m, "title") if v1 != nil { - x.Title, ok = v1.(string) + x.Title, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string version = 2; v2 := compiler.MapValueForKey(m, "version") if v2 != nil { - x.Version, ok = v2.(string) + x.Version, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string terms_of_service = 4; v4 := compiler.MapValueForKey(m, "termsOfService") if v4 != nil { - x.TermsOfService, ok = v4.(string) + x.TermsOfService, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2019,7 +1916,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { v5 := compiler.MapValueForKey(m, "contact") if v5 != nil { var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + x.Contact, err = NewContact(v5, compiler.NewContext("contact", v5, context)) if err != nil { errors = append(errors, err) } @@ -2028,7 +1925,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { v6 := compiler.MapValueForKey(m, "license") if v6 != nil { var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + x.License, err = NewLicense(v6, compiler.NewContext("license", v6, context)) if err != nil { errors = append(errors, err) } @@ -2036,26 +1933,26 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { // repeated NamedAny vendor_extension = 7; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -2069,7 +1966,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { } // NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { errors := make([]error, 0) x := &ItemsItem{} m, ok := compiler.UnpackMap(in) @@ -2078,7 +1975,7 @@ func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) errors = append(errors, compiler.NewError(context, message)) } else { x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", context)) + y, err := NewSchema(m, compiler.NewContext("", m, context)) if err != nil { return nil, err } @@ -2088,7 +1985,7 @@ func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) } // NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { +func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { errors := make([]error, 0) x := &JsonReference{} m, ok := compiler.UnpackMap(in) @@ -2102,28 +1999,21 @@ func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } - allowedKeys := []string{"$ref", "description"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } // string _ref = 1; v1 := compiler.MapValueForKey(m, "$ref") if v1 != nil { - x.XRef, ok = v1.(string) + x.XRef, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { - x.Description, ok = v2.(string) + x.Description, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2132,7 +2022,7 @@ func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference } // NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in interface{}, context *compiler.Context) (*License, error) { +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { errors := make([]error, 0) x := &License{} m, ok := compiler.UnpackMap(in) @@ -2156,44 +2046,44 @@ func NewLicense(in interface{}, context *compiler.Context) (*License, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { - x.Url, ok = v2.(string) + x.Url, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -2207,7 +2097,7 @@ func NewLicense(in interface{}, context *compiler.Context) (*License, error) { } // NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { errors := make([]error, 0) x := &NamedAny{} m, ok := compiler.UnpackMap(in) @@ -2225,9 +2115,9 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2235,7 +2125,7 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2245,7 +2135,7 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { } // NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { +func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { errors := make([]error, 0) x := &NamedHeader{} m, ok := compiler.UnpackMap(in) @@ -2263,9 +2153,9 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2273,7 +2163,7 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + x.Value, err = NewHeader(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2283,7 +2173,7 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er } // NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { +func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { errors := make([]error, 0) x := &NamedParameter{} m, ok := compiler.UnpackMap(in) @@ -2301,9 +2191,9 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2311,7 +2201,7 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + x.Value, err = NewParameter(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2321,7 +2211,7 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet } // NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { errors := make([]error, 0) x := &NamedPathItem{} m, ok := compiler.UnpackMap(in) @@ -2339,9 +2229,9 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2349,7 +2239,7 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2359,7 +2249,7 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem } // NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { +func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { errors := make([]error, 0) x := &NamedResponse{} m, ok := compiler.UnpackMap(in) @@ -2377,9 +2267,9 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2387,7 +2277,7 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + x.Value, err = NewResponse(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2397,7 +2287,7 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse } // NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { +func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { errors := make([]error, 0) x := &NamedResponseValue{} m, ok := compiler.UnpackMap(in) @@ -2415,9 +2305,9 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2425,7 +2315,7 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2435,7 +2325,7 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes } // NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { +func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { errors := make([]error, 0) x := &NamedSchema{} m, ok := compiler.UnpackMap(in) @@ -2453,9 +2343,9 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2463,7 +2353,7 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + x.Value, err = NewSchema(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2473,7 +2363,7 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er } // NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { +func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { errors := make([]error, 0) x := &NamedSecurityDefinitionsItem{} m, ok := compiler.UnpackMap(in) @@ -2491,9 +2381,9 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2501,7 +2391,7 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2511,7 +2401,7 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) } // NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { errors := make([]error, 0) x := &NamedString{} m, ok := compiler.UnpackMap(in) @@ -2529,18 +2419,18 @@ func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, er // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string value = 2; v2 := compiler.MapValueForKey(m, "value") if v2 != nil { - x.Value, ok = v2.(string) + x.Value, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2549,7 +2439,7 @@ func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, er } // NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { errors := make([]error, 0) x := &NamedStringArray{} m, ok := compiler.UnpackMap(in) @@ -2567,9 +2457,9 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2577,7 +2467,7 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin v2 := compiler.MapValueForKey(m, "value") if v2 != nil { var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) if err != nil { errors = append(errors, err) } @@ -2587,7 +2477,7 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin } // NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { +func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { errors := make([]error, 0) x := &NonBodyParameter{} matched := false @@ -2605,7 +2495,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar // HeaderParameterSubSchema header_parameter_sub_schema = 1; { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", m, context)) if matchingError == nil { x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} matched = true @@ -2616,7 +2506,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", m, context)) if matchingError == nil { x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} matched = true @@ -2627,7 +2517,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar // QueryParameterSubSchema query_parameter_sub_schema = 3; { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", m, context)) if matchingError == nil { x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} matched = true @@ -2638,7 +2528,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar // PathParameterSubSchema path_parameter_sub_schema = 4; { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", m, context)) if matchingError == nil { x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} matched = true @@ -2650,12 +2540,16 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid NonBodyParameter") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { +func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { errors := make([]error, 0) x := &Oauth2AccessCodeSecurity{} m, ok := compiler.UnpackMap(in) @@ -2679,30 +2573,30 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [accessCode] if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2710,7 +2604,7 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa v3 := compiler.MapValueForKey(m, "scopes") if v3 != nil { var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) if err != nil { errors = append(errors, err) } @@ -2718,53 +2612,53 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa // string authorization_url = 4; v4 := compiler.MapValueForKey(m, "authorizationUrl") if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string token_url = 5; v5 := compiler.MapValueForKey(m, "tokenUrl") if v5 != nil { - x.TokenUrl, ok = v5.(string) + x.TokenUrl, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 6; v6 := compiler.MapValueForKey(m, "description") if v6 != nil { - x.Description, ok = v6.(string) + x.Description, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 7; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -2778,7 +2672,7 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa } // NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { +func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { errors := make([]error, 0) x := &Oauth2ApplicationSecurity{} m, ok := compiler.UnpackMap(in) @@ -2802,30 +2696,30 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [application] if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2833,7 +2727,7 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O v3 := compiler.MapValueForKey(m, "scopes") if v3 != nil { var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) if err != nil { errors = append(errors, err) } @@ -2841,44 +2735,44 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O // string token_url = 4; v4 := compiler.MapValueForKey(m, "tokenUrl") if v4 != nil { - x.TokenUrl, ok = v4.(string) + x.TokenUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 5; v5 := compiler.MapValueForKey(m, "description") if v5 != nil { - x.Description, ok = v5.(string) + x.Description, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -2892,7 +2786,7 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O } // NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { +func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { errors := make([]error, 0) x := &Oauth2ImplicitSecurity{} m, ok := compiler.UnpackMap(in) @@ -2916,30 +2810,30 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [implicit] if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2947,7 +2841,7 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut v3 := compiler.MapValueForKey(m, "scopes") if v3 != nil { var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) if err != nil { errors = append(errors, err) } @@ -2955,44 +2849,44 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut // string authorization_url = 4; v4 := compiler.MapValueForKey(m, "authorizationUrl") if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 5; v5 := compiler.MapValueForKey(m, "description") if v5 != nil { - x.Description, ok = v5.(string) + x.Description, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3006,7 +2900,7 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut } // NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { +func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { errors := make([]error, 0) x := &Oauth2PasswordSecurity{} m, ok := compiler.UnpackMap(in) @@ -3030,30 +2924,30 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [password] if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3061,7 +2955,7 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut v3 := compiler.MapValueForKey(m, "scopes") if v3 != nil { var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) if err != nil { errors = append(errors, err) } @@ -3069,44 +2963,44 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut // string token_url = 4; v4 := compiler.MapValueForKey(m, "tokenUrl") if v4 != nil { - x.TokenUrl, ok = v4.(string) + x.TokenUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 5; v5 := compiler.MapValueForKey(m, "description") if v5 != nil { - x.Description, ok = v5.(string) + x.Description, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3120,7 +3014,7 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut } // NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { +func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { errors := make([]error, 0) x := &Oauth2Scopes{} m, ok := compiler.UnpackMap(in) @@ -3131,13 +3025,13 @@ func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, // repeated NamedString additional_properties = 1; // MAP: string x.AdditionalProperties = make([]*NamedString, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedString{} pair.Name = k - pair.Value = v.(string) + pair.Value, _ = compiler.StringForScalarNode(v) x.AdditionalProperties = append(x.AdditionalProperties, pair) } } @@ -3146,7 +3040,7 @@ func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, } // NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { errors := make([]error, 0) x := &Operation{} m, ok := compiler.UnpackMap(in) @@ -3170,29 +3064,29 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // repeated string tags = 1; v1 := compiler.MapValueForKey(m, "tags") if v1 != nil { - v, ok := v1.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v1) if ok { - x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + x.Tags = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string summary = 2; v2 := compiler.MapValueForKey(m, "summary") if v2 != nil { - x.Summary, ok = v2.(string) + x.Summary, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3200,7 +3094,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) v4 := compiler.MapValueForKey(m, "externalDocs") if v4 != nil { var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) if err != nil { errors = append(errors, err) } @@ -3208,31 +3102,31 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // string operation_id = 5; v5 := compiler.MapValueForKey(m, "operationId") if v5 != nil { - x.OperationId, ok = v5.(string) + x.OperationId, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string produces = 6; v6 := compiler.MapValueForKey(m, "produces") if v6 != nil { - v, ok := v6.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v6) if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + x.Produces = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string consumes = 7; v7 := compiler.MapValueForKey(m, "consumes") if v7 != nil { - v, ok := v7.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v7) if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Consumes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3241,10 +3135,10 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) if v8 != nil { // repeated ParametersItem x.Parameters = make([]*ParametersItem, 0) - a, ok := v8.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v8) if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) if err != nil { errors = append(errors, err) } @@ -3256,7 +3150,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) v9 := compiler.MapValueForKey(m, "responses") if v9 != nil { var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", v9, context)) if err != nil { errors = append(errors, err) } @@ -3264,26 +3158,26 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // repeated string schemes = 10; v10 := compiler.MapValueForKey(m, "schemes") if v10 != nil { - v, ok := v10.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v10) if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Schemes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [http https ws wss] if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // bool deprecated = 11; v11 := compiler.MapValueForKey(m, "deprecated") if v11 != nil { - x.Deprecated, ok = v11.(bool) + x.Deprecated, ok = compiler.BoolForScalarNode(v11) if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3292,10 +3186,10 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) if v12 != nil { // repeated SecurityRequirement x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v12) if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) if err != nil { errors = append(errors, err) } @@ -3306,26 +3200,26 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // repeated NamedAny vendor_extension = 13; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3339,7 +3233,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) } // NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { errors := make([]error, 0) x := &Parameter{} matched := false @@ -3348,7 +3242,7 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", m, context)) if matchingError == nil { x.Oneof = &Parameter_BodyParameter{BodyParameter: t} matched = true @@ -3362,7 +3256,7 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", m, context)) if matchingError == nil { x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} matched = true @@ -3374,12 +3268,16 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid Parameter") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { +func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { errors := make([]error, 0) x := &ParameterDefinitions{} m, ok := compiler.UnpackMap(in) @@ -3390,14 +3288,14 @@ func NewParameterDefinitions(in interface{}, context *compiler.Context) (*Parame // repeated NamedParameter additional_properties = 1; // MAP: Parameter x.AdditionalProperties = make([]*NamedParameter, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedParameter{} pair.Name = k var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + pair.Value, err = NewParameter(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3409,7 +3307,7 @@ func NewParameterDefinitions(in interface{}, context *compiler.Context) (*Parame } // NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { +func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { errors := make([]error, 0) x := &ParametersItem{} matched := false @@ -3418,7 +3316,7 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) if matchingError == nil { x.Oneof = &ParametersItem_Parameter{Parameter: t} matched = true @@ -3432,7 +3330,7 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) if matchingError == nil { x.Oneof = &ParametersItem_JsonReference{JsonReference: t} matched = true @@ -3444,12 +3342,16 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ParametersItem") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { errors := make([]error, 0) x := &PathItem{} m, ok := compiler.UnpackMap(in) @@ -3467,9 +3369,9 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { // string _ref = 1; v1 := compiler.MapValueForKey(m, "$ref") if v1 != nil { - x.XRef, ok = v1.(string) + x.XRef, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3477,7 +3379,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v2 := compiler.MapValueForKey(m, "get") if v2 != nil { var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + x.Get, err = NewOperation(v2, compiler.NewContext("get", v2, context)) if err != nil { errors = append(errors, err) } @@ -3486,7 +3388,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v3 := compiler.MapValueForKey(m, "put") if v3 != nil { var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + x.Put, err = NewOperation(v3, compiler.NewContext("put", v3, context)) if err != nil { errors = append(errors, err) } @@ -3495,7 +3397,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v4 := compiler.MapValueForKey(m, "post") if v4 != nil { var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + x.Post, err = NewOperation(v4, compiler.NewContext("post", v4, context)) if err != nil { errors = append(errors, err) } @@ -3504,7 +3406,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v5 := compiler.MapValueForKey(m, "delete") if v5 != nil { var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", v5, context)) if err != nil { errors = append(errors, err) } @@ -3513,7 +3415,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v6 := compiler.MapValueForKey(m, "options") if v6 != nil { var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + x.Options, err = NewOperation(v6, compiler.NewContext("options", v6, context)) if err != nil { errors = append(errors, err) } @@ -3522,7 +3424,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v7 := compiler.MapValueForKey(m, "head") if v7 != nil { var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + x.Head, err = NewOperation(v7, compiler.NewContext("head", v7, context)) if err != nil { errors = append(errors, err) } @@ -3531,7 +3433,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { v8 := compiler.MapValueForKey(m, "patch") if v8 != nil { var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", v8, context)) if err != nil { errors = append(errors, err) } @@ -3541,10 +3443,10 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { if v9 != nil { // repeated ParametersItem x.Parameters = make([]*ParametersItem, 0) - a, ok := v9.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v9) if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) if err != nil { errors = append(errors, err) } @@ -3555,26 +3457,26 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { // repeated NamedAny vendor_extension = 10; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3588,7 +3490,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { } // NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { +func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { errors := make([]error, 0) x := &PathParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -3612,66 +3514,66 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [path] if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 5; v5 := compiler.MapValueForKey(m, "type") if v5 != nil { - x.Type, ok = v5.(string) + x.Type, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 6; v6 := compiler.MapValueForKey(m, "format") if v6 != nil { - x.Format, ok = v6.(string) + x.Format, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3679,7 +3581,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path v7 := compiler.MapValueForKey(m, "items") if v7 != nil { var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) if err != nil { errors = append(errors, err) } @@ -3687,15 +3589,15 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // string collection_format = 8; v8 := compiler.MapValueForKey(m, "collectionFormat") if v8 != nil { - x.CollectionFormat, ok = v8.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3703,7 +3605,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path v9 := compiler.MapValueForKey(m, "default") if v9 != nil { var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) if err != nil { errors = append(errors, err) } @@ -3711,126 +3613,102 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // float maximum = 10; v10 := compiler.MapValueForKey(m, "maximum") if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 11; v11 := compiler.MapValueForKey(m, "exclusiveMaximum") if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 12; v12 := compiler.MapValueForKey(m, "minimum") if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 13; v13 := compiler.MapValueForKey(m, "exclusiveMinimum") if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 14; v14 := compiler.MapValueForKey(m, "maxLength") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 15; v15 := compiler.MapValueForKey(m, "minLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 16; v16 := compiler.MapValueForKey(m, "pattern") if v16 != nil { - x.Pattern, ok = v16.(string) + x.Pattern, ok = compiler.StringForScalarNode(v16) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 17; v17 := compiler.MapValueForKey(m, "maxItems") if v17 != nil { - t, ok := v17.(int) + t, ok := compiler.IntForScalarNode(v17) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 18; v18 := compiler.MapValueForKey(m, "minItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 19; v19 := compiler.MapValueForKey(m, "uniqueItems") if v19 != nil { - x.UniqueItems, ok = v19.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3839,10 +3717,10 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path if v20 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v20) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -3853,49 +3731,37 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // float multiple_of = 21; v21 := compiler.MapValueForKey(m, "multipleOf") if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 22; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3909,7 +3775,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path } // NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { errors := make([]error, 0) x := &Paths{} m, ok := compiler.UnpackMap(in) @@ -3927,26 +3793,26 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { // repeated NamedAny vendor_extension = 1; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3958,15 +3824,15 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { // repeated NamedPathItem path = 2; // MAP: PathItem ^/ x.Path = make([]*NamedPathItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "/") { pair := &NamedPathItem{} pair.Name = k var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -3979,7 +3845,7 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { } // NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { +func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { errors := make([]error, 0) x := &PrimitivesItems{} m, ok := compiler.UnpackMap(in) @@ -3997,24 +3863,24 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number integer boolean array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 2; v2 := compiler.MapValueForKey(m, "format") if v2 != nil { - x.Format, ok = v2.(string) + x.Format, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4022,7 +3888,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI v3 := compiler.MapValueForKey(m, "items") if v3 != nil { var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) if err != nil { errors = append(errors, err) } @@ -4030,15 +3896,15 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // string collection_format = 4; v4 := compiler.MapValueForKey(m, "collectionFormat") if v4 != nil { - x.CollectionFormat, ok = v4.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4046,7 +3912,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI v5 := compiler.MapValueForKey(m, "default") if v5 != nil { var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) if err != nil { errors = append(errors, err) } @@ -4054,126 +3920,102 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // float maximum = 6; v6 := compiler.MapValueForKey(m, "maximum") if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 7; v7 := compiler.MapValueForKey(m, "exclusiveMaximum") if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 8; v8 := compiler.MapValueForKey(m, "minimum") if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 9; v9 := compiler.MapValueForKey(m, "exclusiveMinimum") if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 10; v10 := compiler.MapValueForKey(m, "maxLength") if v10 != nil { - t, ok := v10.(int) + t, ok := compiler.IntForScalarNode(v10) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 11; v11 := compiler.MapValueForKey(m, "minLength") if v11 != nil { - t, ok := v11.(int) + t, ok := compiler.IntForScalarNode(v11) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 12; v12 := compiler.MapValueForKey(m, "pattern") if v12 != nil { - x.Pattern, ok = v12.(string) + x.Pattern, ok = compiler.StringForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 13; v13 := compiler.MapValueForKey(m, "maxItems") if v13 != nil { - t, ok := v13.(int) + t, ok := compiler.IntForScalarNode(v13) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 14; v14 := compiler.MapValueForKey(m, "minItems") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 15; v15 := compiler.MapValueForKey(m, "uniqueItems") if v15 != nil { - x.UniqueItems, ok = v15.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4182,10 +4024,10 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI if v16 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v16) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -4196,49 +4038,37 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // float multiple_of = 17; v17 := compiler.MapValueForKey(m, "multipleOf") if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 18; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4252,7 +4082,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI } // NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { errors := make([]error, 0) x := &Properties{} m, ok := compiler.UnpackMap(in) @@ -4263,14 +4093,14 @@ func NewProperties(in interface{}, context *compiler.Context) (*Properties, erro // repeated NamedSchema additional_properties = 1; // MAP: Schema x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedSchema{} pair.Name = k var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4282,7 +4112,7 @@ func NewProperties(in interface{}, context *compiler.Context) (*Properties, erro } // NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { +func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { errors := make([]error, 0) x := &QueryParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -4300,75 +4130,75 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [query] if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // bool allow_empty_value = 5; v5 := compiler.MapValueForKey(m, "allowEmptyValue") if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil { - x.Type, ok = v6.(string) + x.Type, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 7; v7 := compiler.MapValueForKey(m, "format") if v7 != nil { - x.Format, ok = v7.(string) + x.Format, ok = compiler.StringForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4376,7 +4206,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que v8 := compiler.MapValueForKey(m, "items") if v8 != nil { var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) if err != nil { errors = append(errors, err) } @@ -4384,15 +4214,15 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // string collection_format = 9; v9 := compiler.MapValueForKey(m, "collectionFormat") if v9 != nil { - x.CollectionFormat, ok = v9.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes multi] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4400,7 +4230,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que v10 := compiler.MapValueForKey(m, "default") if v10 != nil { var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) if err != nil { errors = append(errors, err) } @@ -4408,126 +4238,102 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // float maximum = 11; v11 := compiler.MapValueForKey(m, "maximum") if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 12; v12 := compiler.MapValueForKey(m, "exclusiveMaximum") if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 13; v13 := compiler.MapValueForKey(m, "minimum") if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 14; v14 := compiler.MapValueForKey(m, "exclusiveMinimum") if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 15; v15 := compiler.MapValueForKey(m, "maxLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 16; v16 := compiler.MapValueForKey(m, "minLength") if v16 != nil { - t, ok := v16.(int) + t, ok := compiler.IntForScalarNode(v16) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 17; v17 := compiler.MapValueForKey(m, "pattern") if v17 != nil { - x.Pattern, ok = v17.(string) + x.Pattern, ok = compiler.StringForScalarNode(v17) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 18; v18 := compiler.MapValueForKey(m, "maxItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 19; v19 := compiler.MapValueForKey(m, "minItems") if v19 != nil { - t, ok := v19.(int) + t, ok := compiler.IntForScalarNode(v19) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 20; v20 := compiler.MapValueForKey(m, "uniqueItems") if v20 != nil { - x.UniqueItems, ok = v20.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4536,10 +4342,10 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que if v21 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v21) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -4550,49 +4356,37 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // float multiple_of = 22; v22 := compiler.MapValueForKey(m, "multipleOf") if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 23; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4606,7 +4400,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que } // NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { errors := make([]error, 0) x := &Response{} m, ok := compiler.UnpackMap(in) @@ -4630,9 +4424,9 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { - x.Description, ok = v1.(string) + x.Description, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4640,7 +4434,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { v2 := compiler.MapValueForKey(m, "schema") if v2 != nil { var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", v2, context)) if err != nil { errors = append(errors, err) } @@ -4649,7 +4443,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { v3 := compiler.MapValueForKey(m, "headers") if v3 != nil { var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", v3, context)) if err != nil { errors = append(errors, err) } @@ -4658,7 +4452,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { v4 := compiler.MapValueForKey(m, "examples") if v4 != nil { var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", v4, context)) if err != nil { errors = append(errors, err) } @@ -4666,26 +4460,26 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { // repeated NamedAny vendor_extension = 5; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4699,7 +4493,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { } // NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { +func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { errors := make([]error, 0) x := &ResponseDefinitions{} m, ok := compiler.UnpackMap(in) @@ -4710,14 +4504,14 @@ func NewResponseDefinitions(in interface{}, context *compiler.Context) (*Respons // repeated NamedResponse additional_properties = 1; // MAP: Response x.AdditionalProperties = make([]*NamedResponse, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedResponse{} pair.Name = k var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + pair.Value, err = NewResponse(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4729,7 +4523,7 @@ func NewResponseDefinitions(in interface{}, context *compiler.Context) (*Respons } // NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { +func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { errors := make([]error, 0) x := &ResponseValue{} matched := false @@ -4738,7 +4532,7 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) if matchingError == nil { x.Oneof = &ResponseValue_Response{Response: t} matched = true @@ -4752,7 +4546,7 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) if matchingError == nil { x.Oneof = &ResponseValue_JsonReference{JsonReference: t} matched = true @@ -4764,12 +4558,16 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ResponseValue") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { errors := make([]error, 0) x := &Responses{} m, ok := compiler.UnpackMap(in) @@ -4787,15 +4585,15 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) // repeated NamedResponseValue response_code = 1; // MAP: ResponseValue ^([0-9]{3})$|^(default)$ x.ResponseCode = make([]*NamedResponseValue, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if pattern2.MatchString(k) { pair := &NamedResponseValue{} pair.Name = k var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4806,26 +4604,26 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) // repeated NamedAny vendor_extension = 2; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -4839,7 +4637,7 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) } // NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { errors := make([]error, 0) x := &Schema{} m, ok := compiler.UnpackMap(in) @@ -4857,36 +4655,36 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // string _ref = 1; v1 := compiler.MapValueForKey(m, "$ref") if v1 != nil { - x.XRef, ok = v1.(string) + x.XRef, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 2; v2 := compiler.MapValueForKey(m, "format") if v2 != nil { - x.Format, ok = v2.(string) + x.Format, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string title = 3; v3 := compiler.MapValueForKey(m, "title") if v3 != nil { - x.Title, ok = v3.(string) + x.Title, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 4; v4 := compiler.MapValueForKey(m, "description") if v4 != nil { - x.Description, ok = v4.(string) + x.Description, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4894,7 +4692,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v5 := compiler.MapValueForKey(m, "default") if v5 != nil { var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) if err != nil { errors = append(errors, err) } @@ -4902,182 +4700,146 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // float multiple_of = 6; v6 := compiler.MapValueForKey(m, "multipleOf") if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.MultipleOf = v6 - case float32: - x.MultipleOf = float64(v6) - case uint64: - x.MultipleOf = float64(v6) - case uint32: - x.MultipleOf = float64(v6) - case int64: - x.MultipleOf = float64(v6) - case int32: - x.MultipleOf = float64(v6) - case int: - x.MultipleOf = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // float maximum = 7; v7 := compiler.MapValueForKey(m, "maximum") if v7 != nil { - switch v7 := v7.(type) { - case float64: - x.Maximum = v7 - case float32: - x.Maximum = float64(v7) - case uint64: - x.Maximum = float64(v7) - case uint32: - x.Maximum = float64(v7) - case int64: - x.Maximum = float64(v7) - case int32: - x.Maximum = float64(v7) - case int: - x.Maximum = float64(v7) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + v, ok := compiler.FloatForScalarNode(v7) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 8; v8 := compiler.MapValueForKey(m, "exclusiveMaximum") if v8 != nil { - x.ExclusiveMaximum, ok = v8.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 9; v9 := compiler.MapValueForKey(m, "minimum") if v9 != nil { - switch v9 := v9.(type) { - case float64: - x.Minimum = v9 - case float32: - x.Minimum = float64(v9) - case uint64: - x.Minimum = float64(v9) - case uint32: - x.Minimum = float64(v9) - case int64: - x.Minimum = float64(v9) - case int32: - x.Minimum = float64(v9) - case int: - x.Minimum = float64(v9) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + v, ok := compiler.FloatForScalarNode(v9) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 10; v10 := compiler.MapValueForKey(m, "exclusiveMinimum") if v10 != nil { - x.ExclusiveMinimum, ok = v10.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 11; v11 := compiler.MapValueForKey(m, "maxLength") if v11 != nil { - t, ok := v11.(int) + t, ok := compiler.IntForScalarNode(v11) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 12; v12 := compiler.MapValueForKey(m, "minLength") if v12 != nil { - t, ok := v12.(int) + t, ok := compiler.IntForScalarNode(v12) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 13; v13 := compiler.MapValueForKey(m, "pattern") if v13 != nil { - x.Pattern, ok = v13.(string) + x.Pattern, ok = compiler.StringForScalarNode(v13) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 14; v14 := compiler.MapValueForKey(m, "maxItems") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 15; v15 := compiler.MapValueForKey(m, "minItems") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 16; v16 := compiler.MapValueForKey(m, "uniqueItems") if v16 != nil { - x.UniqueItems, ok = v16.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v16) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_properties = 17; v17 := compiler.MapValueForKey(m, "maxProperties") if v17 != nil { - t, ok := v17.(int) + t, ok := compiler.IntForScalarNode(v17) if ok { x.MaxProperties = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_properties = 18; v18 := compiler.MapValueForKey(m, "minProperties") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MinProperties = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string required = 19; v19 := compiler.MapValueForKey(m, "required") if v19 != nil { - v, ok := v19.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v19) if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + x.Required = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } @@ -5086,10 +4848,10 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { if v20 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v20) if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) if err != nil { errors = append(errors, err) } @@ -5101,7 +4863,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v21 := compiler.MapValueForKey(m, "additionalProperties") if v21 != nil { var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", v21, context)) if err != nil { errors = append(errors, err) } @@ -5110,7 +4872,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v22 := compiler.MapValueForKey(m, "type") if v22 != nil { var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", v22, context)) if err != nil { errors = append(errors, err) } @@ -5119,7 +4881,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v23 := compiler.MapValueForKey(m, "items") if v23 != nil { var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", v23, context)) if err != nil { errors = append(errors, err) } @@ -5129,10 +4891,10 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { if v24 != nil { // repeated Schema x.AllOf = make([]*Schema, 0) - a, ok := v24.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v24) if ok { - for _, item := range a { - y, err := NewSchema(item, compiler.NewContext("allOf", context)) + for _, item := range a.Content { + y, err := NewSchema(item, compiler.NewContext("allOf", item, context)) if err != nil { errors = append(errors, err) } @@ -5144,7 +4906,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v25 := compiler.MapValueForKey(m, "properties") if v25 != nil { var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", v25, context)) if err != nil { errors = append(errors, err) } @@ -5152,18 +4914,18 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // string discriminator = 26; v26 := compiler.MapValueForKey(m, "discriminator") if v26 != nil { - x.Discriminator, ok = v26.(string) + x.Discriminator, ok = compiler.StringForScalarNode(v26) if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) errors = append(errors, compiler.NewError(context, message)) } } // bool read_only = 27; v27 := compiler.MapValueForKey(m, "readOnly") if v27 != nil { - x.ReadOnly, ok = v27.(bool) + x.ReadOnly, ok = compiler.BoolForScalarNode(v27) if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) errors = append(errors, compiler.NewError(context, message)) } } @@ -5171,7 +4933,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v28 := compiler.MapValueForKey(m, "xml") if v28 != nil { var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + x.Xml, err = NewXml(v28, compiler.NewContext("xml", v28, context)) if err != nil { errors = append(errors, err) } @@ -5180,7 +4942,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v29 := compiler.MapValueForKey(m, "externalDocs") if v29 != nil { var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", v29, context)) if err != nil { errors = append(errors, err) } @@ -5189,7 +4951,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { v30 := compiler.MapValueForKey(m, "example") if v30 != nil { var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + x.Example, err = NewAny(v30, compiler.NewContext("example", v30, context)) if err != nil { errors = append(errors, err) } @@ -5197,26 +4959,26 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // repeated NamedAny vendor_extension = 31; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -5230,7 +4992,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { } // NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { +func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { errors := make([]error, 0) x := &SchemaItem{} matched := false @@ -5239,7 +5001,7 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) if matchingError == nil { x.Oneof = &SchemaItem_Schema{Schema: t} matched = true @@ -5253,7 +5015,7 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", m, context)) if matchingError == nil { x.Oneof = &SchemaItem_FileSchema{FileSchema: t} matched = true @@ -5265,12 +5027,16 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SchemaItem") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { +func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { errors := make([]error, 0) x := &SecurityDefinitions{} m, ok := compiler.UnpackMap(in) @@ -5281,14 +5047,14 @@ func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*Securit // repeated NamedSecurityDefinitionsItem additional_properties = 1; // MAP: SecurityDefinitionsItem x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedSecurityDefinitionsItem{} pair.Name = k var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -5300,7 +5066,7 @@ func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*Securit } // NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { +func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { errors := make([]error, 0) x := &SecurityDefinitionsItem{} matched := false @@ -5309,7 +5075,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", m, context)) if matchingError == nil { x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} matched = true @@ -5323,7 +5089,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", m, context)) if matchingError == nil { x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} matched = true @@ -5337,7 +5103,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", m, context)) if matchingError == nil { x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} matched = true @@ -5351,7 +5117,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", m, context)) if matchingError == nil { x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} matched = true @@ -5365,7 +5131,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", m, context)) if matchingError == nil { x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} matched = true @@ -5379,7 +5145,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", m, context)) if matchingError == nil { x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} matched = true @@ -5391,12 +5157,16 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") + err := compiler.NewError(context, message) + errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { errors := make([]error, 0) x := &SecurityRequirement{} m, ok := compiler.UnpackMap(in) @@ -5407,14 +5177,14 @@ func NewSecurityRequirement(in interface{}, context *compiler.Context) (*Securit // repeated NamedStringArray additional_properties = 1; // MAP: StringArray x.AdditionalProperties = make([]*NamedStringArray, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedStringArray{} pair.Name = k var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -5426,24 +5196,19 @@ func NewSecurityRequirement(in interface{}, context *compiler.Context) (*Securit } // NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { errors := make([]error, 0) x := &StringArray{} - a, ok := in.([]interface{}) - if !ok { - message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Value = make([]string, 0) - for _, s := range a { - x.Value = append(x.Value, s.(string)) - } + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) } return x, compiler.NewErrorGroupOrNil(errors) } // NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { errors := make([]error, 0) x := &Tag{} m, ok := compiler.UnpackMap(in) @@ -5467,18 +5232,18 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { - x.Description, ok = v2.(string) + x.Description, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -5486,7 +5251,7 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { v3 := compiler.MapValueForKey(m, "externalDocs") if v3 != nil { var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) if err != nil { errors = append(errors, err) } @@ -5494,26 +5259,26 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { // repeated NamedAny vendor_extension = 4; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -5527,17 +5292,19 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { } // NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { +func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { errors := make([]error, 0) x := &TypeItem{} - switch in := in.(type) { - case string: + v1 := in + switch v1.Kind { + case yaml.ScalarNode: x.Value = make([]string, 0) - x.Value = append(x.Value, in) - case []interface{}: + x.Value = append(x.Value, v1.Value) + case yaml.SequenceNode: x.Value = make([]string, 0) - for _, v := range in { - value, ok := v.(string) + for _, v := range v1.Content { + value := v.Value + ok := v.Kind == yaml.ScalarNode if ok { x.Value = append(x.Value, value) } else { @@ -5553,7 +5320,7 @@ func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { } // NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { +func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { errors := make([]error, 0) x := &VendorExtension{} m, ok := compiler.UnpackMap(in) @@ -5564,25 +5331,25 @@ func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExten // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -5595,7 +5362,7 @@ func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExten } // NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { errors := make([]error, 0) x := &Xml{} m, ok := compiler.UnpackMap(in) @@ -5613,71 +5380,71 @@ func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string namespace = 2; v2 := compiler.MapValueForKey(m, "namespace") if v2 != nil { - x.Namespace, ok = v2.(string) + x.Namespace, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string prefix = 3; v3 := compiler.MapValueForKey(m, "prefix") if v3 != nil { - x.Prefix, ok = v3.(string) + x.Prefix, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // bool attribute = 4; v4 := compiler.MapValueForKey(m, "attribute") if v4 != nil { - x.Attribute, ok = v4.(bool) + x.Attribute, ok = compiler.BoolForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // bool wrapped = 5; v5 := compiler.MapValueForKey(m, "wrapped") if v5 != nil { - x.Wrapped, ok = v5.(bool) + x.Wrapped, ok = compiler.BoolForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } @@ -5691,7 +5458,7 @@ func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { } // ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) @@ -5706,13 +5473,13 @@ func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (interface{}, error) { +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { +func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5726,7 +5493,7 @@ func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5740,7 +5507,7 @@ func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{ } // ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { +func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Schema != nil { _, err := m.Schema.ResolveReferences(root) @@ -5760,7 +5527,7 @@ func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (interface{}, error) { +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5774,7 +5541,7 @@ func (m *Contact) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (interface{}, error) { +func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -5788,7 +5555,7 @@ func (m *Default) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (interface{}, error) { +func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -5802,7 +5569,7 @@ func (m *Definitions) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (interface{}, error) { +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Info != nil { _, err := m.Info.ResolveReferences(root) @@ -5874,7 +5641,7 @@ func (m *Document) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (interface{}, error) { +func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -5888,7 +5655,7 @@ func (m *Examples) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5902,7 +5669,7 @@ func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { +func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Default != nil { _, err := m.Default.ResolveReferences(root) @@ -5934,7 +5701,7 @@ func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -5968,7 +5735,7 @@ func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{} } // ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (interface{}, error) { +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6002,7 +5769,7 @@ func (m *Header) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6036,7 +5803,7 @@ func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (interface{}, error) { +func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6050,7 +5817,7 @@ func (m *Headers) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (interface{}, error) { +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Contact != nil { _, err := m.Contact.ResolveReferences(root) @@ -6076,7 +5843,7 @@ func (m *Info) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.Schema { if item != nil { @@ -6090,7 +5857,7 @@ func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { +func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.XRef != "" { info, err := compiler.ReadInfoForRef(root, m.XRef) @@ -6110,7 +5877,7 @@ func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (interface{}, error) { +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -6124,7 +5891,7 @@ func (m *License) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6136,7 +5903,7 @@ func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { +func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6148,7 +5915,7 @@ func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { +func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6160,7 +5927,7 @@ func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6172,7 +5939,7 @@ func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { +func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6184,7 +5951,7 @@ func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { +func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6196,7 +5963,7 @@ func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) } // ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { +func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6208,7 +5975,7 @@ func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6220,13 +5987,13 @@ func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface } // ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (interface{}, error) { +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6238,7 +6005,7 @@ func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { +func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) @@ -6280,7 +6047,7 @@ func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6300,7 +6067,7 @@ func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6320,7 +6087,7 @@ func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6340,7 +6107,7 @@ func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, er } // ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6360,7 +6127,7 @@ func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, er } // ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6374,7 +6141,7 @@ func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (interface{}, error) { +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.ExternalDocs != nil { _, err := m.ExternalDocs.ResolveReferences(root) @@ -6416,7 +6183,7 @@ func (m *Operation) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (interface{}, error) { +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*Parameter_BodyParameter) @@ -6440,7 +6207,7 @@ func (m *Parameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { +func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6454,7 +6221,7 @@ func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, erro } // ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { +func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*ParametersItem_Parameter) @@ -6486,7 +6253,7 @@ func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (interface{}, error) { +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.XRef != "" { info, err := compiler.ReadInfoForRef(root, m.XRef) @@ -6564,7 +6331,7 @@ func (m *PathItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6598,7 +6365,7 @@ func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, er } // ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (interface{}, error) { +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -6620,7 +6387,7 @@ func (m *Paths) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { +func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6654,7 +6421,7 @@ func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (interface{}, error) { +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6668,7 +6435,7 @@ func (m *Properties) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6702,7 +6469,7 @@ func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, e } // ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (interface{}, error) { +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Schema != nil { _, err := m.Schema.ResolveReferences(root) @@ -6734,7 +6501,7 @@ func (m *Response) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { +func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6748,7 +6515,7 @@ func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error } // ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { +func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*ResponseValue_Response) @@ -6780,7 +6547,7 @@ func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (interface{}, error) { +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.ResponseCode { if item != nil { @@ -6802,7 +6569,7 @@ func (m *Responses) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (interface{}, error) { +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.XRef != "" { info, err := compiler.ReadInfoForRef(root, m.XRef) @@ -6894,7 +6661,7 @@ func (m *Schema) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { +func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*SchemaItem_Schema) @@ -6918,7 +6685,7 @@ func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { +func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6932,7 +6699,7 @@ func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error } // ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) @@ -6992,7 +6759,7 @@ func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, e } // ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -7006,13 +6773,13 @@ func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error } // ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (interface{}, error) { +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (interface{}, error) { +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.ExternalDocs != nil { _, err := m.ExternalDocs.ResolveReferences(root) @@ -7032,13 +6799,13 @@ func (m *Tag) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { +func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { +func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -7052,7 +6819,7 @@ func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (interface{}, error) { +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -7066,7 +6833,7 @@ func (m *Xml) ResolveReferences(root string) (interface{}, error) { } // ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // AdditionalPropertiesItem // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -7076,792 +6843,886 @@ func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { } // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return v1.Boolean + return compiler.NewScalarNodeForBool(v1.Boolean) } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() interface{} { +func (m *Any) ToRawInfo() *yaml.Node { var err error - var info1 []yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info1) + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) if err == nil { - return info1 - } - var info2 yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info2) - if err == nil { - return info2 - } - var info3 interface{} - err = yaml.Unmarshal([]byte(m.Yaml), &info3) - if err == nil { - return info3 + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *BodyParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) } if m.Email != "" { - info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Default) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} return info } // ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Definitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) // always include this required field. - info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) - // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) if m.Host != "" { - info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) } if m.BasePath != "" { - info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) } if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) - // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) if m.Definitions != nil { - info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) + info.Content = append(info.Content, m.Definitions.ToRawInfo()) } - // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Parameters != nil { - info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) } - // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Responses != nil { - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) } - // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Security) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Security { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "security", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) + info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) } - // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Tags) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Tags { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "tags", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) } - // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Examples) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *FileSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Headers) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) // always include this required field. - info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.TermsOfService != "" { - info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) } if m.Contact != nil { - info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) } - // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.License != nil { - info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) } - // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if len(m.Schema) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Schema { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "schema", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, items) } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} return info } // ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *JsonReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } return info } // ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) } - // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedHeader) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedResponse) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedResponseValue) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Value != "" { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) } return info } // ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() interface{} { +func (m *NonBodyParameter) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // NonBodyParameter // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -7884,126 +7745,143 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { if v3 != nil { return v3.ToRawInfo() } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } @@ -8012,69 +7890,77 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { } // ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) } if m.Summary != "" { - info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.OperationId != "" { - info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) } if len(m.Parameters) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} // always include this required field. - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } if m.Deprecated != false { - info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) } if len(m.Security) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Security { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "security", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() interface{} { +func (m *Parameter) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // Parameter // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8087,26 +7973,26 @@ func (m *Parameter) ToRawInfo() interface{} { if v1 != nil { return v1.ToRawInfo() } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() interface{} { +func (m *ParametersItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // ParametersItem // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8119,390 +8005,443 @@ func (m *ParametersItem) ToRawInfo() interface{} { if v1 != nil { return v1.ToRawInfo() } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) } if m.Get != nil { - info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) } - // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Put != nil { - info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) } - // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Post != nil { - info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) } - // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Delete != nil { - info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) } - // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Options != nil { - info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) } - // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Head != nil { - info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) } - // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Patch != nil { - info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) } - // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Parameters) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} if m.Path != nil { for _, item := range m.Path { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} return info } // ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *PrimitivesItems) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) if m.Schema != nil { - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) } - // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Headers != nil { - info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) } - // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Examples != nil { - info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) } - // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() interface{} { +func (m *ResponseValue) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // ResponseValue // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8515,163 +8454,187 @@ func (m *ResponseValue) ToRawInfo() interface{} { if v1 != nil { return v1.ToRawInfo() } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.ResponseCode != nil { for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) } if m.MinProperties != 0 { - info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) } if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) } - // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Type != nil { if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) } else { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) } } - // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Items != nil { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Items.Schema { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] } - info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) } - // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.AllOf) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.AllOf { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "allOf", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) } - // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.Properties != nil { - info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) } - // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Discriminator != "" { - info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } if m.Xml != nil { - info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) } - // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() interface{} { +func (m *SchemaItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // SchemaItem // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8684,26 +8647,26 @@ func (m *SchemaItem) ToRawInfo() interface{} { if v1 != nil { return v1.ToRawInfo() } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { +func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // SecurityDefinitionsItem // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8736,107 +8699,115 @@ func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { if v5 != nil { return v5.ToRawInfo() } - return nil + return compiler.NewNullNode() } // ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() interface{} { - return m.Value +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) } // ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *TypeItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if len(m.Value) != 0 { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) } return info } // ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *VendorExtension) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Namespace != "" { - info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) } if m.Prefix != "" { - info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) } if m.Attribute != false { - info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) } if m.Wrapped != false { - info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go index 55a6cb51604c9..8a5f302f337a6 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go @@ -1,77 +1,85 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.15.5 // source: openapiv2/OpenAPIv2.proto package openapi_v2 import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type AdditionalPropertiesItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` } -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{0} +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b) -} -func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic) -} -func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src) -} -func (m *AdditionalPropertiesItem) XXX_Size() int { - return xxx_messageInfo_AdditionalPropertiesItem.Size(m) -} -func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() { - xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m) +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} +func (*AdditionalPropertiesItem) ProtoMessage() {} -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} } -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { if m != nil { return m.Oneof @@ -79,202 +87,238 @@ func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { return nil } -func (m *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { +func (x *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { return x.Schema } return nil } -func (m *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { return x.Boolean } return false } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() } -type Any struct { - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{1} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` } -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` } -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Any proto.InternalMessageInfo +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} +} -func (m *Any) GetValue() *any.Any { - if m != nil { - return m.Value +func (x *Any) GetValue() *anypb.Any { + if x != nil { + return x.Value } return nil } -func (m *Any) GetYaml() string { - if m != nil { - return m.Yaml +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml } return "" } type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{2} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic) -} -func (m *ApiKeySecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApiKeySecurity.Merge(m, src) + +func (x *ApiKeySecurity) Reset() { + *x = ApiKeySecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ApiKeySecurity) XXX_Size() int { - return xxx_messageInfo_ApiKeySecurity.Size(m) + +func (x *ApiKeySecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ApiKeySecurity) XXX_DiscardUnknown() { - xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m) + +func (*ApiKeySecurity) ProtoMessage() {} + +func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo +// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} +} -func (m *ApiKeySecurity) GetType() string { - if m != nil { - return m.Type +func (x *ApiKeySecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ApiKeySecurity) GetName() string { - if m != nil { - return m.Name +func (x *ApiKeySecurity) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *ApiKeySecurity) GetIn() string { - if m != nil { - return m.In +func (x *ApiKeySecurity) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *ApiKeySecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *ApiKeySecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{3} + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b) -} -func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic) -} -func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src) +func (x *BasicAuthenticationSecurity) Reset() { + *x = BasicAuthenticationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BasicAuthenticationSecurity) XXX_Size() int { - return xxx_messageInfo_BasicAuthenticationSecurity.Size(m) + +func (x *BasicAuthenticationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m) + +func (*BasicAuthenticationSecurity) ProtoMessage() {} + +func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo +// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} +} -func (m *BasicAuthenticationSecurity) GetType() string { - if m != nil { - return m.Type +func (x *BasicAuthenticationSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *BasicAuthenticationSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *BasicAuthenticationSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type BodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. @@ -282,228 +326,260 @@ type BodyParameter struct { // Determines the location of the parameter. In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{4} +func (x *BodyParameter) Reset() { + *x = BodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BodyParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BodyParameter.Unmarshal(m, b) -} -func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic) -} -func (m *BodyParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_BodyParameter.Merge(m, src) -} -func (m *BodyParameter) XXX_Size() int { - return xxx_messageInfo_BodyParameter.Size(m) +func (x *BodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BodyParameter) XXX_DiscardUnknown() { - xxx_messageInfo_BodyParameter.DiscardUnknown(m) + +func (*BodyParameter) ProtoMessage() {} + +func (x *BodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BodyParameter proto.InternalMessageInfo +// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. +func (*BodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} +} -func (m *BodyParameter) GetDescription() string { - if m != nil { - return m.Description +func (x *BodyParameter) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *BodyParameter) GetName() string { - if m != nil { - return m.Name +func (x *BodyParameter) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *BodyParameter) GetIn() string { - if m != nil { - return m.In +func (x *BodyParameter) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *BodyParameter) GetRequired() bool { - if m != nil { - return m.Required +func (x *BodyParameter) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *BodyParameter) GetSchema() *Schema { - if m != nil { - return m.Schema +func (x *BodyParameter) GetSchema() *Schema { + if x != nil { + return x.Schema } return nil } -func (m *BodyParameter) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *BodyParameter) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // Contact information for the owners of the API. type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The identifying name of the contact person/organization. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the contact information. Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{5} +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Contact) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Contact.Unmarshal(m, b) -} -func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Contact.Marshal(b, m, deterministic) +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Contact) XXX_Merge(src proto.Message) { - xxx_messageInfo_Contact.Merge(m, src) -} -func (m *Contact) XXX_Size() int { - return xxx_messageInfo_Contact.Size(m) -} -func (m *Contact) XXX_DiscardUnknown() { - xxx_messageInfo_Contact.DiscardUnknown(m) + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Contact proto.InternalMessageInfo +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} +} -func (m *Contact) GetName() string { - if m != nil { - return m.Name +func (x *Contact) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Contact) GetUrl() string { - if m != nil { - return m.Url +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *Contact) GetEmail() string { - if m != nil { - return m.Email +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email } return "" } -func (m *Contact) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Contact) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Default struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{6} +func (x *Default) Reset() { + *x = Default{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Default) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Default.Unmarshal(m, b) -} -func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Default.Marshal(b, m, deterministic) -} -func (m *Default) XXX_Merge(src proto.Message) { - xxx_messageInfo_Default.Merge(m, src) +func (x *Default) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Default) XXX_Size() int { - return xxx_messageInfo_Default.Size(m) -} -func (m *Default) XXX_DiscardUnknown() { - xxx_messageInfo_Default.DiscardUnknown(m) + +func (*Default) ProtoMessage() {} + +func (x *Default) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Default proto.InternalMessageInfo +// Deprecated: Use Default.ProtoReflect.Descriptor instead. +func (*Default) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} +} -func (m *Default) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties +func (x *Default) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties } return nil } // One or more JSON objects describing the schemas being consumed and produced by the API. type Definitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{7} +func (x *Definitions) Reset() { + *x = Definitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Definitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Definitions.Unmarshal(m, b) -} -func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Definitions.Marshal(b, m, deterministic) -} -func (m *Definitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_Definitions.Merge(m, src) +func (x *Definitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Definitions) XXX_Size() int { - return xxx_messageInfo_Definitions.Size(m) -} -func (m *Definitions) XXX_DiscardUnknown() { - xxx_messageInfo_Definitions.DiscardUnknown(m) + +func (*Definitions) ProtoMessage() {} + +func (x *Definitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Definitions proto.InternalMessageInfo +// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. +func (*Definitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} +} -func (m *Definitions) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties +func (x *Definitions) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties } return nil } type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The Swagger version of this document. Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` @@ -516,366 +592,398 @@ type Document struct { // A list of MIME types accepted by the API. Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{8} + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Document proto.InternalMessageInfo +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} +} -func (m *Document) GetSwagger() string { - if m != nil { - return m.Swagger +func (x *Document) GetSwagger() string { + if x != nil { + return x.Swagger } return "" } -func (m *Document) GetInfo() *Info { - if m != nil { - return m.Info +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info } return nil } -func (m *Document) GetHost() string { - if m != nil { - return m.Host +func (x *Document) GetHost() string { + if x != nil { + return x.Host } return "" } -func (m *Document) GetBasePath() string { - if m != nil { - return m.BasePath +func (x *Document) GetBasePath() string { + if x != nil { + return x.BasePath } return "" } -func (m *Document) GetSchemes() []string { - if m != nil { - return m.Schemes +func (x *Document) GetSchemes() []string { + if x != nil { + return x.Schemes } return nil } -func (m *Document) GetConsumes() []string { - if m != nil { - return m.Consumes +func (x *Document) GetConsumes() []string { + if x != nil { + return x.Consumes } return nil } -func (m *Document) GetProduces() []string { - if m != nil { - return m.Produces +func (x *Document) GetProduces() []string { + if x != nil { + return x.Produces } return nil } -func (m *Document) GetPaths() *Paths { - if m != nil { - return m.Paths +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths } return nil } -func (m *Document) GetDefinitions() *Definitions { - if m != nil { - return m.Definitions +func (x *Document) GetDefinitions() *Definitions { + if x != nil { + return x.Definitions } return nil } -func (m *Document) GetParameters() *ParameterDefinitions { - if m != nil { - return m.Parameters +func (x *Document) GetParameters() *ParameterDefinitions { + if x != nil { + return x.Parameters } return nil } -func (m *Document) GetResponses() *ResponseDefinitions { - if m != nil { - return m.Responses +func (x *Document) GetResponses() *ResponseDefinitions { + if x != nil { + return x.Responses } return nil } -func (m *Document) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security } return nil } -func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { - if m != nil { - return m.SecurityDefinitions +func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions } return nil } -func (m *Document) GetTags() []*Tag { - if m != nil { - return m.Tags +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags } return nil } -func (m *Document) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Document) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Document) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Examples struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{9} +func (x *Examples) Reset() { + *x = Examples{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Examples) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Examples.Unmarshal(m, b) -} -func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Examples.Marshal(b, m, deterministic) -} -func (m *Examples) XXX_Merge(src proto.Message) { - xxx_messageInfo_Examples.Merge(m, src) -} -func (m *Examples) XXX_Size() int { - return xxx_messageInfo_Examples.Size(m) +func (x *Examples) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Examples) XXX_DiscardUnknown() { - xxx_messageInfo_Examples.DiscardUnknown(m) + +func (*Examples) ProtoMessage() {} + +func (x *Examples) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Examples proto.InternalMessageInfo +// Deprecated: Use Examples.ProtoReflect.Descriptor instead. +func (*Examples) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} +} -func (m *Examples) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties +func (x *Examples) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties } return nil } // information about external documentation type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{10} + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *ExternalDocs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExternalDocs.Unmarshal(m, b) -} -func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic) -} -func (m *ExternalDocs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalDocs.Merge(m, src) +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExternalDocs) XXX_Size() int { - return xxx_messageInfo_ExternalDocs.Size(m) + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExternalDocs) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalDocs.DiscardUnknown(m) + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} +} -func (m *ExternalDocs) GetDescription() string { - if m != nil { - return m.Description +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *ExternalDocs) GetUrl() string { - if m != nil { - return m.Url +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *ExternalDocs) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *ExternalDocs) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // A deterministic version of a JSON Schema object. type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{11} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *FileSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileSchema.Unmarshal(m, b) + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic) -} -func (m *FileSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSchema.Merge(m, src) + +func (x *FileSchema) Reset() { + *x = FileSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *FileSchema) XXX_Size() int { - return xxx_messageInfo_FileSchema.Size(m) + +func (x *FileSchema) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *FileSchema) XXX_DiscardUnknown() { - xxx_messageInfo_FileSchema.DiscardUnknown(m) + +func (*FileSchema) ProtoMessage() {} + +func (x *FileSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_FileSchema proto.InternalMessageInfo +// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. +func (*FileSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} +} -func (m *FileSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *FileSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *FileSchema) GetTitle() string { - if m != nil { - return m.Title +func (x *FileSchema) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *FileSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *FileSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *FileSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *FileSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *FileSchema) GetRequired() []string { - if m != nil { - return m.Required +func (x *FileSchema) GetRequired() []string { + if x != nil { + return x.Required } return nil } -func (m *FileSchema) GetType() string { - if m != nil { - return m.Type +func (x *FileSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *FileSchema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *FileSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *FileSchema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *FileSchema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *FileSchema) GetExample() *Any { - if m != nil { - return m.Example +func (x *FileSchema) GetExample() *Any { + if x != nil { + return x.Example } return nil } -func (m *FileSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *FileSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type FormDataParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -885,400 +993,416 @@ type FormDataParameterSubSchema struct { // The name of the parameter. Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FormDataParameterSubSchema) Reset() { + *x = FormDataParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FormDataParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormDataParameterSubSchema) ProtoMessage() {} + +func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{12} -} - -func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b) -} -func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src) -} -func (m *FormDataParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_FormDataParameterSubSchema.Size(m) -} -func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} } -var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo - -func (m *FormDataParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *FormDataParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *FormDataParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *FormDataParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *FormDataParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *FormDataParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *FormDataParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *FormDataParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue +func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue } return false } -func (m *FormDataParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *FormDataParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *FormDataParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *FormDataParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *FormDataParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *FormDataParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *FormDataParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *FormDataParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *FormDataParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *FormDataParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *FormDataParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *FormDataParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *FormDataParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *FormDataParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *FormDataParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *FormDataParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *FormDataParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *FormDataParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *FormDataParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *FormDataParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *FormDataParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *FormDataParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *FormDataParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *FormDataParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *FormDataParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *FormDataParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Header struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{13} -} - -func (m *Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Header.Unmarshal(m, b) -} -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Header.Marshal(b, m, deterministic) -} -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) -} -func (m *Header) XXX_Size() int { - return xxx_messageInfo_Header.Size(m) -} -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} } -var xxx_messageInfo_Header proto.InternalMessageInfo - -func (m *Header) GetType() string { - if m != nil { - return m.Type +func (x *Header) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Header) GetFormat() string { - if m != nil { - return m.Format +func (x *Header) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *Header) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *Header) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *Header) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *Header) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *Header) GetDefault() *Any { - if m != nil { - return m.Default +func (x *Header) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *Header) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *Header) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *Header) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *Header) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *Header) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *Header) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *Header) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *Header) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *Header) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *Header) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *Header) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *Header) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *Header) GetPattern() string { - if m != nil { - return m.Pattern +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *Header) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *Header) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *Header) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *Header) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *Header) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *Header) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *Header) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *Header) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *Header) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *Header) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *Header) GetDescription() string { - if m != nil { - return m.Description +func (x *Header) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Header) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Header) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type HeaderParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -1286,250 +1410,266 @@ type HeaderParameterSubSchema struct { // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *HeaderParameterSubSchema) Reset() { + *x = HeaderParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameterSubSchema) ProtoMessage() {} + +func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{14} -} - -func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b) -} -func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src) -} -func (m *HeaderParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_HeaderParameterSubSchema.Size(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} } -func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo -func (m *HeaderParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *HeaderParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *HeaderParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *HeaderParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *HeaderParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *HeaderParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *HeaderParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *HeaderParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *HeaderParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *HeaderParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *HeaderParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *HeaderParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *HeaderParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *HeaderParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *HeaderParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *HeaderParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *HeaderParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *HeaderParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *HeaderParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *HeaderParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *HeaderParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *HeaderParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *HeaderParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *HeaderParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *HeaderParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *HeaderParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *HeaderParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *HeaderParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *HeaderParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *HeaderParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *HeaderParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *HeaderParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *HeaderParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *HeaderParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Headers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{15} +func (x *Headers) Reset() { + *x = Headers{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Headers) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Headers.Unmarshal(m, b) -} -func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Headers.Marshal(b, m, deterministic) +func (x *Headers) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Headers) XXX_Merge(src proto.Message) { - xxx_messageInfo_Headers.Merge(m, src) -} -func (m *Headers) XXX_Size() int { - return xxx_messageInfo_Headers.Size(m) -} -func (m *Headers) XXX_DiscardUnknown() { - xxx_messageInfo_Headers.DiscardUnknown(m) + +func (*Headers) ProtoMessage() {} + +func (x *Headers) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Headers proto.InternalMessageInfo +// Deprecated: Use Headers.ProtoReflect.Descriptor instead. +func (*Headers) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} +} -func (m *Headers) GetAdditionalProperties() []*NamedHeader { - if m != nil { - return m.AdditionalProperties +func (x *Headers) GetAdditionalProperties() []*NamedHeader { + if x != nil { + return x.AdditionalProperties } return nil } // General information about the API. type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A unique and precise title of the API. Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // A semantic version number of the API. @@ -1537,797 +1677,885 @@ type Info struct { // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{16} + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Info) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Info.Unmarshal(m, b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) -} -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Info) XXX_Size() int { - return xxx_messageInfo_Info.Size(m) + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Info proto.InternalMessageInfo +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} +} -func (m *Info) GetTitle() string { - if m != nil { - return m.Title +func (x *Info) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *Info) GetVersion() string { - if m != nil { - return m.Version +func (x *Info) GetVersion() string { + if x != nil { + return x.Version } return "" } -func (m *Info) GetDescription() string { - if m != nil { - return m.Description +func (x *Info) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Info) GetTermsOfService() string { - if m != nil { - return m.TermsOfService +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService } return "" } -func (m *Info) GetContact() *Contact { - if m != nil { - return m.Contact +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact } return nil } -func (m *Info) GetLicense() *License { - if m != nil { - return m.License +func (x *Info) GetLicense() *License { + if x != nil { + return x.License } return nil } -func (m *Info) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Info) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{17} + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` } -func (m *ItemsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ItemsItem.Unmarshal(m, b) -} -func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic) -} -func (m *ItemsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ItemsItem.Merge(m, src) +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ItemsItem) XXX_Size() int { - return xxx_messageInfo_ItemsItem.Size(m) + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ItemsItem) XXX_DiscardUnknown() { - xxx_messageInfo_ItemsItem.DiscardUnknown(m) + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ItemsItem proto.InternalMessageInfo +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} +} -func (m *ItemsItem) GetSchema() []*Schema { - if m != nil { - return m.Schema +func (x *ItemsItem) GetSchema() []*Schema { + if x != nil { + return x.Schema } return nil } type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{18} + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` } -func (m *JsonReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_JsonReference.Unmarshal(m, b) -} -func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic) -} -func (m *JsonReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_JsonReference.Merge(m, src) +func (x *JsonReference) Reset() { + *x = JsonReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *JsonReference) XXX_Size() int { - return xxx_messageInfo_JsonReference.Size(m) + +func (x *JsonReference) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *JsonReference) XXX_DiscardUnknown() { - xxx_messageInfo_JsonReference.DiscardUnknown(m) + +func (*JsonReference) ProtoMessage() {} + +func (x *JsonReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_JsonReference proto.InternalMessageInfo +// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. +func (*JsonReference) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} +} -func (m *JsonReference) GetXRef() string { - if m != nil { - return m.XRef +func (x *JsonReference) GetXRef() string { + if x != nil { + return x.XRef } return "" } -func (m *JsonReference) GetDescription() string { - if m != nil { - return m.Description +func (x *JsonReference) GetDescription() string { + if x != nil { + return x.Description } return "" } type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of the license type. It's encouraged to use an OSI compatible license. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{19} +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *License) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_License.Unmarshal(m, b) -} -func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_License.Marshal(b, m, deterministic) +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *License) XXX_Merge(src proto.Message) { - xxx_messageInfo_License.Merge(m, src) -} -func (m *License) XXX_Size() int { - return xxx_messageInfo_License.Size(m) -} -func (m *License) XXX_DiscardUnknown() { - xxx_messageInfo_License.DiscardUnknown(m) + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_License proto.InternalMessageInfo +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} +} -func (m *License) GetName() string { - if m != nil { - return m.Name +func (x *License) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *License) GetUrl() string { - if m != nil { - return m.Url +func (x *License) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *License) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *License) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{20} +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedAny) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedAny.Unmarshal(m, b) -} -func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic) -} -func (m *NamedAny) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedAny.Merge(m, src) -} -func (m *NamedAny) XXX_Size() int { - return xxx_messageInfo_NamedAny.Size(m) +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedAny) XXX_DiscardUnknown() { - xxx_messageInfo_NamedAny.DiscardUnknown(m) + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedAny proto.InternalMessageInfo +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} +} -func (m *NamedAny) GetName() string { - if m != nil { - return m.Name +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedAny) GetValue() *Any { - if m != nil { - return m.Value +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. type NamedHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{21} +func (x *NamedHeader) Reset() { + *x = NamedHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedHeader.Unmarshal(m, b) -} -func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic) -} -func (m *NamedHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedHeader.Merge(m, src) +func (x *NamedHeader) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedHeader) XXX_Size() int { - return xxx_messageInfo_NamedHeader.Size(m) -} -func (m *NamedHeader) XXX_DiscardUnknown() { - xxx_messageInfo_NamedHeader.DiscardUnknown(m) + +func (*NamedHeader) ProtoMessage() {} + +func (x *NamedHeader) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedHeader proto.InternalMessageInfo +// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. +func (*NamedHeader) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} +} -func (m *NamedHeader) GetName() string { - if m != nil { - return m.Name +func (x *NamedHeader) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedHeader) GetValue() *Header { - if m != nil { - return m.Value +func (x *NamedHeader) GetValue() *Header { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. type NamedParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{22} +func (x *NamedParameter) Reset() { + *x = NamedParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedParameter.Unmarshal(m, b) -} -func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic) +func (x *NamedParameter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedParameter.Merge(m, src) -} -func (m *NamedParameter) XXX_Size() int { - return xxx_messageInfo_NamedParameter.Size(m) -} -func (m *NamedParameter) XXX_DiscardUnknown() { - xxx_messageInfo_NamedParameter.DiscardUnknown(m) + +func (*NamedParameter) ProtoMessage() {} + +func (x *NamedParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedParameter proto.InternalMessageInfo +// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. +func (*NamedParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} +} -func (m *NamedParameter) GetName() string { - if m != nil { - return m.Name +func (x *NamedParameter) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedParameter) GetValue() *Parameter { - if m != nil { - return m.Value +func (x *NamedParameter) GetValue() *Parameter { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{23} +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedPathItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedPathItem.Unmarshal(m, b) +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic) -} -func (m *NamedPathItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedPathItem.Merge(m, src) -} -func (m *NamedPathItem) XXX_Size() int { - return xxx_messageInfo_NamedPathItem.Size(m) -} -func (m *NamedPathItem) XXX_DiscardUnknown() { - xxx_messageInfo_NamedPathItem.DiscardUnknown(m) + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} +} -func (m *NamedPathItem) GetName() string { - if m != nil { - return m.Name +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedPathItem) GetValue() *PathItem { - if m != nil { - return m.Value +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. type NamedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{24} +func (x *NamedResponse) Reset() { + *x = NamedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedResponse.Unmarshal(m, b) -} -func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic) -} -func (m *NamedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResponse.Merge(m, src) -} -func (m *NamedResponse) XXX_Size() int { - return xxx_messageInfo_NamedResponse.Size(m) +func (x *NamedResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResponse.DiscardUnknown(m) + +func (*NamedResponse) ProtoMessage() {} + +func (x *NamedResponse) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedResponse proto.InternalMessageInfo +// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. +func (*NamedResponse) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} +} -func (m *NamedResponse) GetName() string { - if m != nil { - return m.Name +func (x *NamedResponse) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedResponse) GetValue() *Response { - if m != nil { - return m.Value +func (x *NamedResponse) GetValue() *Response { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. type NamedResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{25} +func (x *NamedResponseValue) Reset() { + *x = NamedResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b) -} -func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic) -} -func (m *NamedResponseValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResponseValue.Merge(m, src) +func (x *NamedResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedResponseValue) XXX_Size() int { - return xxx_messageInfo_NamedResponseValue.Size(m) -} -func (m *NamedResponseValue) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResponseValue.DiscardUnknown(m) + +func (*NamedResponseValue) ProtoMessage() {} + +func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo +// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} +} -func (m *NamedResponseValue) GetName() string { - if m != nil { - return m.Name +func (x *NamedResponseValue) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedResponseValue) GetValue() *ResponseValue { - if m != nil { - return m.Value +func (x *NamedResponseValue) GetValue() *ResponseValue { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. type NamedSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{26} +func (x *NamedSchema) Reset() { + *x = NamedSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedSchema.Unmarshal(m, b) -} -func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic) +func (x *NamedSchema) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedSchema.Merge(m, src) -} -func (m *NamedSchema) XXX_Size() int { - return xxx_messageInfo_NamedSchema.Size(m) -} -func (m *NamedSchema) XXX_DiscardUnknown() { - xxx_messageInfo_NamedSchema.DiscardUnknown(m) + +func (*NamedSchema) ProtoMessage() {} + +func (x *NamedSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedSchema proto.InternalMessageInfo +// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. +func (*NamedSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} +} -func (m *NamedSchema) GetName() string { - if m != nil { - return m.Name +func (x *NamedSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedSchema) GetValue() *Schema { - if m != nil { - return m.Value +func (x *NamedSchema) GetValue() *Schema { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. type NamedSecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{27} +func (x *NamedSecurityDefinitionsItem) Reset() { + *x = NamedSecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b) +func (x *NamedSecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic) -} -func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src) -} -func (m *NamedSecurityDefinitionsItem) XXX_Size() int { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m) -} -func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() { - xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m) + +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} + +func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo +// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} +} -func (m *NamedSecurityDefinitionsItem) GetName() string { - if m != nil { - return m.Name +func (x *NamedSecurityDefinitionsItem) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if m != nil { - return m.Value +func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of string as ordered (name,value) pairs. type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{28} +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedString.Unmarshal(m, b) -} -func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedString.Marshal(b, m, deterministic) -} -func (m *NamedString) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedString.Merge(m, src) -} -func (m *NamedString) XXX_Size() int { - return xxx_messageInfo_NamedString.Size(m) +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedString) XXX_DiscardUnknown() { - xxx_messageInfo_NamedString.DiscardUnknown(m) + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedString proto.InternalMessageInfo +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} +} -func (m *NamedString) GetName() string { - if m != nil { - return m.Name +func (x *NamedString) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedString) GetValue() string { - if m != nil { - return m.Value +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value } return "" } // Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{29} +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedStringArray) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedStringArray.Unmarshal(m, b) -} -func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic) -} -func (m *NamedStringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedStringArray.Merge(m, src) +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedStringArray) XXX_Size() int { - return xxx_messageInfo_NamedStringArray.Size(m) -} -func (m *NamedStringArray) XXX_DiscardUnknown() { - xxx_messageInfo_NamedStringArray.DiscardUnknown(m) + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} +} -func (m *NamedStringArray) GetName() string { - if m != nil { - return m.Name +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedStringArray) GetValue() *StringArray { - if m != nil { - return m.Value +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value } return nil } type NonBodyParameter struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *NonBodyParameter_HeaderParameterSubSchema // *NonBodyParameter_FormDataParameterSubSchema // *NonBodyParameter_QueryParameterSubSchema // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` } -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{30} +func (x *NonBodyParameter) Reset() { + *x = NonBodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b) -} -func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic) -} -func (m *NonBodyParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonBodyParameter.Merge(m, src) -} -func (m *NonBodyParameter) XXX_Size() int { - return xxx_messageInfo_NonBodyParameter.Size(m) -} -func (m *NonBodyParameter) XXX_DiscardUnknown() { - xxx_messageInfo_NonBodyParameter.DiscardUnknown(m) +func (x *NonBodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo +func (*NonBodyParameter) ProtoMessage() {} -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() +func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` +// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} } -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { if m != nil { return m.Oneof @@ -2335,408 +2563,470 @@ func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { return nil } -func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { +func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { return x.HeaderParameterSubSchema } return nil } -func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { +func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { return x.FormDataParameterSubSchema } return nil } -func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { +func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { return x.QueryParameterSubSchema } return nil } -func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { +func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { return x.PathParameterSubSchema } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() } -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{31} +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` } -func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b) +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` } -func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic) + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +type Oauth2AccessCodeSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src) + +func (x *Oauth2AccessCodeSecurity) Reset() { + *x = Oauth2AccessCodeSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2AccessCodeSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m) + +func (x *Oauth2AccessCodeSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m) + +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} + +func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} +} -func (m *Oauth2AccessCodeSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2AccessCodeSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2AccessCodeSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2AccessCodeSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl +func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl } return "" } -func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl +func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl } return "" } -func (m *Oauth2AccessCodeSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2AccessCodeSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{32} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src) + +func (x *Oauth2ApplicationSecurity) Reset() { + *x = Oauth2ApplicationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2ApplicationSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m) + +func (x *Oauth2ApplicationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m) + +func (*Oauth2ApplicationSecurity) ProtoMessage() {} + +func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} +} -func (m *Oauth2ApplicationSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2ApplicationSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2ApplicationSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2ApplicationSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl +func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl } return "" } -func (m *Oauth2ApplicationSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2ApplicationSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{33} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b) -} -func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src) + +func (x *Oauth2ImplicitSecurity) Reset() { + *x = Oauth2ImplicitSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2ImplicitSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m) + +func (x *Oauth2ImplicitSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m) + +func (*Oauth2ImplicitSecurity) ProtoMessage() {} + +func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} +} -func (m *Oauth2ImplicitSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2ImplicitSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2ImplicitSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2ImplicitSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl +func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl } return "" } -func (m *Oauth2ImplicitSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2ImplicitSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{34} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src) + +func (x *Oauth2PasswordSecurity) Reset() { + *x = Oauth2PasswordSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2PasswordSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2PasswordSecurity.Size(m) + +func (x *Oauth2PasswordSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m) + +func (*Oauth2PasswordSecurity) ProtoMessage() {} + +func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} +} -func (m *Oauth2PasswordSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2PasswordSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2PasswordSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2PasswordSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2PasswordSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl +func (x *Oauth2PasswordSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl } return "" } -func (m *Oauth2PasswordSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2PasswordSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2Scopes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{35} +func (x *Oauth2Scopes) Reset() { + *x = Oauth2Scopes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b) -} -func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic) -} -func (m *Oauth2Scopes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2Scopes.Merge(m, src) +func (x *Oauth2Scopes) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2Scopes) XXX_Size() int { - return xxx_messageInfo_Oauth2Scopes.Size(m) -} -func (m *Oauth2Scopes) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m) + +func (*Oauth2Scopes) ProtoMessage() {} + +func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo +// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} +} -func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if m != nil { - return m.AdditionalProperties +func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties } return nil } type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` // A brief summary of the operation. Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` @@ -2753,182 +3043,178 @@ type Operation struct { Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{36} + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Operation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Operation.Unmarshal(m, b) -} -func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Operation.Marshal(b, m, deterministic) -} -func (m *Operation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Operation.Merge(m, src) +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Operation) XXX_Size() int { - return xxx_messageInfo_Operation.Size(m) + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Operation) XXX_DiscardUnknown() { - xxx_messageInfo_Operation.DiscardUnknown(m) + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Operation proto.InternalMessageInfo +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} +} -func (m *Operation) GetTags() []string { - if m != nil { - return m.Tags +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags } return nil } -func (m *Operation) GetSummary() string { - if m != nil { - return m.Summary +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary } return "" } -func (m *Operation) GetDescription() string { - if m != nil { - return m.Description +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Operation) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Operation) GetOperationId() string { - if m != nil { - return m.OperationId +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId } return "" } -func (m *Operation) GetProduces() []string { - if m != nil { - return m.Produces +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces } return nil } -func (m *Operation) GetConsumes() []string { - if m != nil { - return m.Consumes +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes } return nil } -func (m *Operation) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters +func (x *Operation) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters } return nil } -func (m *Operation) GetResponses() *Responses { - if m != nil { - return m.Responses +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses } return nil } -func (m *Operation) GetSchemes() []string { - if m != nil { - return m.Schemes +func (x *Operation) GetSchemes() []string { + if x != nil { + return x.Schemes } return nil } -func (m *Operation) GetDeprecated() bool { - if m != nil { - return m.Deprecated +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated } return false } -func (m *Operation) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security } return nil } -func (m *Operation) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Operation) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Parameter struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *Parameter_BodyParameter // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` } -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{37} +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Parameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Parameter.Unmarshal(m, b) -} -func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Parameter.Marshal(b, m, deterministic) -} -func (m *Parameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Parameter.Merge(m, src) -} -func (m *Parameter) XXX_Size() int { - return xxx_messageInfo_Parameter.Size(m) -} -func (m *Parameter) XXX_DiscardUnknown() { - xxx_messageInfo_Parameter.DiscardUnknown(m) +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Parameter proto.InternalMessageInfo - -type isParameter_Oneof interface { - isParameter_Oneof() -} +func (*Parameter) ProtoMessage() {} -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} } -func (*Parameter_BodyParameter) isParameter_Oneof() {} - -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - func (m *Parameter) GetOneof() isParameter_Oneof { if m != nil { return m.Oneof @@ -2936,119 +3222,127 @@ func (m *Parameter) GetOneof() isParameter_Oneof { return nil } -func (m *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { +func (x *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { return x.BodyParameter } return nil } -func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { +func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { return x.NonBodyParameter } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Parameter) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +} + +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` } +func (*Parameter_BodyParameter) isParameter_Oneof() {} + +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + // One or more JSON representations for parameters type ParameterDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{38} +func (x *ParameterDefinitions) Reset() { + *x = ParameterDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b) +func (x *ParameterDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic) -} -func (m *ParameterDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParameterDefinitions.Merge(m, src) -} -func (m *ParameterDefinitions) XXX_Size() int { - return xxx_messageInfo_ParameterDefinitions.Size(m) -} -func (m *ParameterDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m) + +func (*ParameterDefinitions) ProtoMessage() {} + +func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo +// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} +} -func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if m != nil { - return m.AdditionalProperties +func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if x != nil { + return x.AdditionalProperties } return nil } type ParametersItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *ParametersItem_Parameter // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` } -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{39} +func (x *ParametersItem) Reset() { + *x = ParametersItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ParametersItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ParametersItem.Unmarshal(m, b) -} -func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic) -} -func (m *ParametersItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParametersItem.Merge(m, src) -} -func (m *ParametersItem) XXX_Size() int { - return xxx_messageInfo_ParametersItem.Size(m) -} -func (m *ParametersItem) XXX_DiscardUnknown() { - xxx_messageInfo_ParametersItem.DiscardUnknown(m) +func (x *ParametersItem) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ParametersItem proto.InternalMessageInfo - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} +func (*ParametersItem) ProtoMessage() {} -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +func (x *ParametersItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. +func (*ParametersItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} } -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} - -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - func (m *ParametersItem) GetOneof() isParametersItem_Oneof { if m != nil { return m.Oneof @@ -3056,29 +3350,41 @@ func (m *ParametersItem) GetOneof() isParametersItem_Oneof { return nil } -func (m *ParametersItem) GetParameter() *Parameter { - if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { +func (x *ParametersItem) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { return x.Parameter } return nil } -func (m *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { +func (x *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { return x.JsonReference } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` @@ -3088,109 +3394,117 @@ type PathItem struct { Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{40} +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PathItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PathItem.Unmarshal(m, b) -} -func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PathItem.Marshal(b, m, deterministic) -} -func (m *PathItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_PathItem.Merge(m, src) +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PathItem) XXX_Size() int { - return xxx_messageInfo_PathItem.Size(m) -} -func (m *PathItem) XXX_DiscardUnknown() { - xxx_messageInfo_PathItem.DiscardUnknown(m) + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_PathItem proto.InternalMessageInfo +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} +} -func (m *PathItem) GetXRef() string { - if m != nil { - return m.XRef +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef } return "" } -func (m *PathItem) GetGet() *Operation { - if m != nil { - return m.Get +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get } return nil } -func (m *PathItem) GetPut() *Operation { - if m != nil { - return m.Put +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put } return nil } -func (m *PathItem) GetPost() *Operation { - if m != nil { - return m.Post +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post } return nil } -func (m *PathItem) GetDelete() *Operation { - if m != nil { - return m.Delete +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete } return nil } -func (m *PathItem) GetOptions() *Operation { - if m != nil { - return m.Options +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options } return nil } -func (m *PathItem) GetHead() *Operation { - if m != nil { - return m.Head +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head } return nil } -func (m *PathItem) GetPatch() *Operation { - if m != nil { - return m.Patch +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch } return nil } -func (m *PathItem) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters +func (x *PathItem) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters } return nil } -func (m *PathItem) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *PathItem) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type PathParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -3198,472 +3512,504 @@ type PathParameterSubSchema struct { // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathParameterSubSchema) Reset() { + *x = PathParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathParameterSubSchema) ProtoMessage() {} + +func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{41} -} - -func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b) -} -func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_PathParameterSubSchema.Merge(m, src) -} -func (m *PathParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_PathParameterSubSchema.Size(m) -} -func (m *PathParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} } -var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo - -func (m *PathParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *PathParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *PathParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *PathParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *PathParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *PathParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *PathParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *PathParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *PathParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *PathParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *PathParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *PathParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *PathParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *PathParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *PathParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *PathParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *PathParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *PathParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *PathParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *PathParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *PathParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *PathParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *PathParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *PathParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *PathParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *PathParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *PathParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *PathParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *PathParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *PathParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *PathParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *PathParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *PathParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *PathParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *PathParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *PathParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // Relative paths to the individual endpoints. They must be relative to the 'basePath'. type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{42} + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` } -func (m *Paths) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Paths.Unmarshal(m, b) -} -func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Paths.Marshal(b, m, deterministic) -} -func (m *Paths) XXX_Merge(src proto.Message) { - xxx_messageInfo_Paths.Merge(m, src) +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Paths) XXX_Size() int { - return xxx_messageInfo_Paths.Size(m) + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Paths) XXX_DiscardUnknown() { - xxx_messageInfo_Paths.DiscardUnknown(m) + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Paths proto.InternalMessageInfo +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} +} -func (m *Paths) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Paths) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } -func (m *Paths) GetPath() []*NamedPathItem { - if m != nil { - return m.Path +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path } return nil } type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PrimitivesItems) Reset() { + *x = PrimitivesItems{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrimitivesItems) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrimitivesItems) ProtoMessage() {} + +func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. func (*PrimitivesItems) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{43} -} - -func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b) -} -func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic) -} -func (m *PrimitivesItems) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrimitivesItems.Merge(m, src) -} -func (m *PrimitivesItems) XXX_Size() int { - return xxx_messageInfo_PrimitivesItems.Size(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} } -func (m *PrimitivesItems) XXX_DiscardUnknown() { - xxx_messageInfo_PrimitivesItems.DiscardUnknown(m) -} - -var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo -func (m *PrimitivesItems) GetType() string { - if m != nil { - return m.Type +func (x *PrimitivesItems) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *PrimitivesItems) GetFormat() string { - if m != nil { - return m.Format +func (x *PrimitivesItems) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *PrimitivesItems) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *PrimitivesItems) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *PrimitivesItems) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *PrimitivesItems) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *PrimitivesItems) GetDefault() *Any { - if m != nil { - return m.Default +func (x *PrimitivesItems) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *PrimitivesItems) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *PrimitivesItems) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *PrimitivesItems) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *PrimitivesItems) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *PrimitivesItems) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *PrimitivesItems) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *PrimitivesItems) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *PrimitivesItems) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *PrimitivesItems) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *PrimitivesItems) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *PrimitivesItems) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *PrimitivesItems) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *PrimitivesItems) GetPattern() string { - if m != nil { - return m.Pattern +func (x *PrimitivesItems) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *PrimitivesItems) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *PrimitivesItems) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *PrimitivesItems) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *PrimitivesItems) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *PrimitivesItems) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *PrimitivesItems) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *PrimitivesItems) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *PrimitivesItems) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *PrimitivesItems) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *PrimitivesItems) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{44} +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Properties) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Properties.Unmarshal(m, b) -} -func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Properties.Marshal(b, m, deterministic) -} -func (m *Properties) XXX_Merge(src proto.Message) { - xxx_messageInfo_Properties.Merge(m, src) -} -func (m *Properties) XXX_Size() int { - return xxx_messageInfo_Properties.Size(m) +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Properties) XXX_DiscardUnknown() { - xxx_messageInfo_Properties.DiscardUnknown(m) + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Properties proto.InternalMessageInfo +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} +} -func (m *Properties) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties +func (x *Properties) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties } return nil } type QueryParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -3673,378 +4019,390 @@ type QueryParameterSubSchema struct { // The name of the parameter. Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *QueryParameterSubSchema) Reset() { + *x = QueryParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterSubSchema) ProtoMessage() {} + +func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{45} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} } -func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b) -} -func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParameterSubSchema.Merge(m, src) -} -func (m *QueryParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_QueryParameterSubSchema.Size(m) -} -func (m *QueryParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo - -func (m *QueryParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *QueryParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *QueryParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *QueryParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *QueryParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *QueryParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *QueryParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *QueryParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue +func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue } return false } -func (m *QueryParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *QueryParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *QueryParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *QueryParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *QueryParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *QueryParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *QueryParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *QueryParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *QueryParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *QueryParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *QueryParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *QueryParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *QueryParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *QueryParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *QueryParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *QueryParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *QueryParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *QueryParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *QueryParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *QueryParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *QueryParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *QueryParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *QueryParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *QueryParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *QueryParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *QueryParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *QueryParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *QueryParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Response struct { - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{46} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(m, src) + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Response proto.InternalMessageInfo +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} +} -func (m *Response) GetDescription() string { - if m != nil { - return m.Description +func (x *Response) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Response) GetSchema() *SchemaItem { - if m != nil { - return m.Schema +func (x *Response) GetSchema() *SchemaItem { + if x != nil { + return x.Schema } return nil } -func (m *Response) GetHeaders() *Headers { - if m != nil { - return m.Headers +func (x *Response) GetHeaders() *Headers { + if x != nil { + return x.Headers } return nil } -func (m *Response) GetExamples() *Examples { - if m != nil { - return m.Examples +func (x *Response) GetExamples() *Examples { + if x != nil { + return x.Examples } return nil } -func (m *Response) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Response) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } -// One or more JSON representations for parameters +// One or more JSON representations for responses type ResponseDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{47} +func (x *ResponseDefinitions) Reset() { + *x = ResponseDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b) -} -func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic) +func (x *ResponseDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResponseDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseDefinitions.Merge(m, src) -} -func (m *ResponseDefinitions) XXX_Size() int { - return xxx_messageInfo_ResponseDefinitions.Size(m) -} -func (m *ResponseDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m) + +func (*ResponseDefinitions) ProtoMessage() {} + +func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo +// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} +} -func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if m != nil { - return m.AdditionalProperties +func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if x != nil { + return x.AdditionalProperties } return nil } type ResponseValue struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *ResponseValue_Response // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` } -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{48} +func (x *ResponseValue) Reset() { + *x = ResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResponseValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResponseValue.Unmarshal(m, b) -} -func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic) -} -func (m *ResponseValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseValue.Merge(m, src) -} -func (m *ResponseValue) XXX_Size() int { - return xxx_messageInfo_ResponseValue.Size(m) -} -func (m *ResponseValue) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseValue.DiscardUnknown(m) +func (x *ResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ResponseValue proto.InternalMessageInfo +func (*ResponseValue) ProtoMessage() {} -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +func (x *ResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. +func (*ResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} } -func (*ResponseValue_Response) isResponseValue_Oneof() {} - -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - func (m *ResponseValue) GetOneof() isResponseValue_Oneof { if m != nil { return m.Oneof @@ -4052,78 +4410,98 @@ func (m *ResponseValue) GetOneof() isResponseValue_Oneof { return nil } -func (m *ResponseValue) GetResponse() *Response { - if x, ok := m.GetOneof().(*ResponseValue_Response); ok { +func (x *ResponseValue) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseValue_Response); ok { return x.Response } return nil } -func (m *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { +func (x *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { return x.JsonReference } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } +type isResponseValue_Oneof interface { + isResponseValue_Oneof() } -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` } -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{49} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } -func (m *Responses) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Responses.Unmarshal(m, b) -} -func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Responses.Marshal(b, m, deterministic) +func (*ResponseValue_Response) isResponseValue_Oneof() {} + +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Responses) XXX_Merge(src proto.Message) { - xxx_messageInfo_Responses.Merge(m, src) + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Responses) XXX_Size() int { - return xxx_messageInfo_Responses.Size(m) + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Responses) XXX_DiscardUnknown() { - xxx_messageInfo_Responses.DiscardUnknown(m) + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Responses proto.InternalMessageInfo +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} +} -func (m *Responses) GetResponseCode() []*NamedResponseValue { - if m != nil { - return m.ResponseCode +func (x *Responses) GetResponseCode() []*NamedResponseValue { + if x != nil { + return x.ResponseCode } return nil } -func (m *Responses) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Responses) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // A deterministic version of a JSON Schema object. type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` @@ -4155,304 +4533,300 @@ type Schema struct { ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{50} +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Schema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema.Unmarshal(m, b) -} -func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema.Marshal(b, m, deterministic) -} -func (m *Schema) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema.Merge(m, src) -} -func (m *Schema) XXX_Size() int { - return xxx_messageInfo_Schema.Size(m) +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Schema) XXX_DiscardUnknown() { - xxx_messageInfo_Schema.DiscardUnknown(m) + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Schema proto.InternalMessageInfo +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} +} -func (m *Schema) GetXRef() string { - if m != nil { - return m.XRef +func (x *Schema) GetXRef() string { + if x != nil { + return x.XRef } return "" } -func (m *Schema) GetFormat() string { - if m != nil { - return m.Format +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *Schema) GetTitle() string { - if m != nil { - return m.Title +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *Schema) GetDescription() string { - if m != nil { - return m.Description +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Schema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *Schema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *Schema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *Schema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *Schema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *Schema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *Schema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *Schema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *Schema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *Schema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *Schema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *Schema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *Schema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *Schema) GetMaxProperties() int64 { - if m != nil { - return m.MaxProperties +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties } return 0 } -func (m *Schema) GetMinProperties() int64 { - if m != nil { - return m.MinProperties +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties } return 0 } -func (m *Schema) GetRequired() []string { - if m != nil { - return m.Required +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required } return nil } -func (m *Schema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if m != nil { - return m.AdditionalProperties +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties } return nil } -func (m *Schema) GetType() *TypeItem { - if m != nil { - return m.Type +func (x *Schema) GetType() *TypeItem { + if x != nil { + return x.Type } return nil } -func (m *Schema) GetItems() *ItemsItem { - if m != nil { - return m.Items +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items } return nil } -func (m *Schema) GetAllOf() []*Schema { - if m != nil { - return m.AllOf +func (x *Schema) GetAllOf() []*Schema { + if x != nil { + return x.AllOf } return nil } -func (m *Schema) GetProperties() *Properties { - if m != nil { - return m.Properties +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties } return nil } -func (m *Schema) GetDiscriminator() string { - if m != nil { - return m.Discriminator +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator } return "" } -func (m *Schema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *Schema) GetXml() *Xml { - if m != nil { - return m.Xml +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml } return nil } -func (m *Schema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Schema) GetExample() *Any { - if m != nil { - return m.Example +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example } return nil } -func (m *Schema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Schema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type SchemaItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *SchemaItem_Schema // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` } -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{51} +func (x *SchemaItem) Reset() { + *x = SchemaItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SchemaItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SchemaItem.Unmarshal(m, b) -} -func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic) -} -func (m *SchemaItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SchemaItem.Merge(m, src) +func (x *SchemaItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SchemaItem) XXX_Size() int { - return xxx_messageInfo_SchemaItem.Size(m) -} -func (m *SchemaItem) XXX_DiscardUnknown() { - xxx_messageInfo_SchemaItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SchemaItem proto.InternalMessageInfo -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} +func (*SchemaItem) ProtoMessage() {} -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +func (x *SchemaItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` +// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. +func (*SchemaItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} } -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} - -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { if m != nil { return m.Oneof @@ -4460,105 +4834,178 @@ func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { return nil } -func (m *SchemaItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { +func (x *SchemaItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { return x.Schema } return nil } -func (m *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { +func (x *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { return x.FileSchema } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() } -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{52} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` } -func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b) -} -func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic) +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +type SecurityDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } -func (m *SecurityDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityDefinitions.Merge(m, src) + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SecurityDefinitions) XXX_Size() int { - return xxx_messageInfo_SecurityDefinitions.Size(m) + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SecurityDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m) + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo +// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} +} -func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if m != nil { - return m.AdditionalProperties +func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if x != nil { + return x.AdditionalProperties } return nil } type SecurityDefinitionsItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *SecurityDefinitionsItem_BasicAuthenticationSecurity // *SecurityDefinitionsItem_ApiKeySecurity // *SecurityDefinitionsItem_Oauth2ImplicitSecurity // *SecurityDefinitionsItem_Oauth2PasswordSecurity // *SecurityDefinitionsItem_Oauth2ApplicationSecurity // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` } -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} +func (x *SecurityDefinitionsItem) Reset() { + *x = SecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitionsItem) ProtoMessage() {} + +func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{53} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} } -func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b) +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil } -func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic) + +func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil } -func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src) + +func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil } -func (m *SecurityDefinitionsItem) XXX_Size() int { - return xxx_messageInfo_SecurityDefinitionsItem.Size(m) + +func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil } -func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m) + +func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil } -var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo +func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} type isSecurityDefinitionsItem_Oneof interface { isSecurityDefinitionsItem_Oneof() @@ -4600,627 +5047,2296 @@ func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsI func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } -func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} } -func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties } return nil } -func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{54} -} +func (*StringArray) ProtoMessage() {} -func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b) -} -func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic) -} -func (m *SecurityRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityRequirement.Merge(m, src) -} -func (m *SecurityRequirement) XXX_Size() int { - return xxx_messageInfo_SecurityRequirement.Size(m) -} -func (m *SecurityRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityRequirement.DiscardUnknown(m) +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} +} -func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if m != nil { - return m.AdditionalProperties +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value } return nil } -type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{55} + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *StringArray) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringArray.Unmarshal(m, b) -} -func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringArray.Marshal(b, m, deterministic) -} -func (m *StringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringArray.Merge(m, src) -} -func (m *StringArray) XXX_Size() int { - return xxx_messageInfo_StringArray.Size(m) +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *StringArray) XXX_DiscardUnknown() { - xxx_messageInfo_StringArray.DiscardUnknown(m) + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StringArray proto.InternalMessageInfo +func (*Tag) ProtoMessage() {} -func (m *StringArray) GetValue() []string { - if m != nil { - return m.Value +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. func (*Tag) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{56} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} } -func (m *Tag) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tag.Unmarshal(m, b) -} -func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tag.Marshal(b, m, deterministic) -} -func (m *Tag) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tag.Merge(m, src) -} -func (m *Tag) XXX_Size() int { - return xxx_messageInfo_Tag.Size(m) -} -func (m *Tag) XXX_DiscardUnknown() { - xxx_messageInfo_Tag.DiscardUnknown(m) -} - -var xxx_messageInfo_Tag proto.InternalMessageInfo - -func (m *Tag) GetName() string { - if m != nil { - return m.Name +func (x *Tag) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Tag) GetDescription() string { - if m != nil { - return m.Description +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Tag) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Tag) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Tag) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{57} + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` } -func (m *TypeItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TypeItem.Unmarshal(m, b) -} -func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic) -} -func (m *TypeItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_TypeItem.Merge(m, src) +func (x *TypeItem) Reset() { + *x = TypeItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TypeItem) XXX_Size() int { - return xxx_messageInfo_TypeItem.Size(m) + +func (x *TypeItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TypeItem) XXX_DiscardUnknown() { - xxx_messageInfo_TypeItem.DiscardUnknown(m) + +func (*TypeItem) ProtoMessage() {} + +func (x *TypeItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TypeItem proto.InternalMessageInfo +// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. +func (*TypeItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} +} -func (m *TypeItem) GetValue() []string { - if m != nil { - return m.Value +func (x *TypeItem) GetValue() []string { + if x != nil { + return x.Value } return nil } // Any property starting with x- is valid. type VendorExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{58} +func (x *VendorExtension) Reset() { + *x = VendorExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VendorExtension) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VendorExtension.Unmarshal(m, b) -} -func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic) +func (x *VendorExtension) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VendorExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_VendorExtension.Merge(m, src) -} -func (m *VendorExtension) XXX_Size() int { - return xxx_messageInfo_VendorExtension.Size(m) -} -func (m *VendorExtension) XXX_DiscardUnknown() { - xxx_messageInfo_VendorExtension.DiscardUnknown(m) + +func (*VendorExtension) ProtoMessage() {} + +func (x *VendorExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_VendorExtension proto.InternalMessageInfo +// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. +func (*VendorExtension) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} +} -func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties +func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties } return nil } type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { - return fileDescriptor_a43d10d209cd31c2, []int{59} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Xml) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Xml.Unmarshal(m, b) -} -func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Xml.Marshal(b, m, deterministic) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Xml) XXX_Merge(src proto.Message) { - xxx_messageInfo_Xml.Merge(m, src) + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Xml) XXX_Size() int { - return xxx_messageInfo_Xml.Size(m) + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Xml) XXX_DiscardUnknown() { - xxx_messageInfo_Xml.DiscardUnknown(m) + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Xml proto.InternalMessageInfo +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} +} -func (m *Xml) GetName() string { - if m != nil { - return m.Name +func (x *Xml) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Xml) GetNamespace() string { - if m != nil { - return m.Namespace +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace } return "" } -func (m *Xml) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } -func (m *Xml) GetAttribute() bool { - if m != nil { - return m.Attribute +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute } return false } -func (m *Xml) GetWrapped() bool { - if m != nil { - return m.Wrapped +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped } return false } -func (m *Xml) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func init() { - proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") - proto.RegisterType((*Any)(nil), "openapi.v2.Any") - proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") - proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") - proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") - proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") - proto.RegisterType((*Default)(nil), "openapi.v2.Default") - proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") - proto.RegisterType((*Document)(nil), "openapi.v2.Document") - proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") - proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") - proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") - proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") - proto.RegisterType((*Header)(nil), "openapi.v2.Header") - proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") - proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") - proto.RegisterType((*Info)(nil), "openapi.v2.Info") - proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") - proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") - proto.RegisterType((*License)(nil), "openapi.v2.License") - proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") - proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") - proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") - proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") - proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") - proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") - proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") - proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") - proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") - proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") - proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") - proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") - proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") - proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") - proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") - proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") - proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") - proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") - proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") - proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") - proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") - proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") - proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") - proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") - proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") - proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") - proto.RegisterType((*Response)(nil), "openapi.v2.Response") - proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") - proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") - proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") - proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") - proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") - proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") - proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") - proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") - proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") - proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") - proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") - proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") - proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") -} - -func init() { proto.RegisterFile("openapiv2/OpenAPIv2.proto", fileDescriptor_a43d10d209cd31c2) } - -var fileDescriptor_a43d10d209cd31c2 = []byte{ - // 3130 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, - 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, - 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, - 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, - 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, - 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xa6, 0x5f, 0xb7, - 0xe7, 0x61, 0xb9, 0x80, 0x02, 0xad, 0x66, 0xee, 0x3d, 0xe7, 0x9e, 0x7b, 0xfa, 0xf4, 0x79, 0xdd, - 0x73, 0x6e, 0xc3, 0x3a, 0xf1, 0xb0, 0x8b, 0x3c, 0xeb, 0x64, 0xe7, 0xd6, 0x9e, 0x87, 0xdd, 0xdd, - 0xfd, 0x07, 0x27, 0x3b, 0xdb, 0x9e, 0x4f, 0x42, 0xa2, 0x81, 0x00, 0x6d, 0x9f, 0xec, 0x6c, 0xac, - 0x1f, 0x11, 0x72, 0x64, 0xe3, 0x5b, 0x0c, 0x72, 0x38, 0x1c, 0xdc, 0x42, 0xee, 0x88, 0xa3, 0x6d, - 0x39, 0xa0, 0xef, 0xf6, 0xfb, 0x56, 0x68, 0x11, 0x17, 0xd9, 0xfb, 0x3e, 0xf1, 0xb0, 0x1f, 0x5a, - 0x38, 0x78, 0x10, 0x62, 0x47, 0xfb, 0x3f, 0xa8, 0x05, 0xbd, 0x63, 0xec, 0x20, 0xbd, 0x78, 0xa5, - 0x78, 0x6d, 0x61, 0x47, 0xdb, 0x8e, 0x68, 0x6e, 0x1f, 0x30, 0x48, 0xb7, 0x60, 0x08, 0x1c, 0x6d, - 0x03, 0xea, 0x87, 0x84, 0xd8, 0x18, 0xb9, 0x7a, 0xe9, 0x4a, 0xf1, 0x5a, 0xa3, 0x5b, 0x30, 0xe4, - 0xc4, 0xed, 0x3a, 0x54, 0x89, 0x8b, 0xc9, 0x60, 0xeb, 0x1e, 0x94, 0x77, 0xdd, 0x91, 0x76, 0x03, - 0xaa, 0x27, 0xc8, 0x1e, 0x62, 0x41, 0x78, 0x75, 0x9b, 0x33, 0xb8, 0x2d, 0x19, 0xdc, 0xde, 0x75, - 0x47, 0x06, 0x47, 0xd1, 0x34, 0xa8, 0x8c, 0x90, 0x63, 0x33, 0xa2, 0x4d, 0x83, 0xfd, 0xdf, 0xfa, - 0xa2, 0x08, 0xed, 0x5d, 0xcf, 0x7a, 0x17, 0x8f, 0x0e, 0x70, 0x6f, 0xe8, 0x5b, 0xe1, 0x88, 0xa2, - 0x85, 0x23, 0x8f, 0x53, 0x6c, 0x1a, 0xec, 0x3f, 0x9d, 0x73, 0x91, 0x83, 0xe5, 0x52, 0xfa, 0x5f, - 0x6b, 0x43, 0xc9, 0x72, 0xf5, 0x32, 0x9b, 0x29, 0x59, 0xae, 0x76, 0x05, 0x16, 0xfa, 0x38, 0xe8, - 0xf9, 0x96, 0x47, 0x65, 0xa0, 0x57, 0x18, 0x20, 0x3e, 0xa5, 0x7d, 0x0d, 0x3a, 0x27, 0xd8, 0xed, - 0x13, 0xdf, 0xc4, 0xa7, 0x21, 0x76, 0x03, 0x8a, 0x56, 0xbd, 0x52, 0x66, 0x7c, 0xc7, 0x04, 0xf2, - 0x1e, 0x72, 0x70, 0x9f, 0xf2, 0xbd, 0xc4, 0xb1, 0xef, 0x49, 0xe4, 0xad, 0xcf, 0x8a, 0xb0, 0x79, - 0x1b, 0x05, 0x56, 0x6f, 0x77, 0x18, 0x1e, 0x63, 0x37, 0xb4, 0x7a, 0x88, 0x12, 0x9e, 0xc8, 0x7a, - 0x8a, 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, - 0x3e, 0xf2, 0x91, 0x83, 0x43, 0xec, 0xa7, 0x37, 0x2d, 0x66, 0x37, 0x9d, 0x45, 0xa2, 0x1b, 0xd0, - 0xf0, 0xf1, 0x93, 0xa1, 0xe5, 0xe3, 0x3e, 0x13, 0x67, 0xc3, 0x18, 0x8f, 0xb5, 0x1b, 0x63, 0x95, - 0xaa, 0xe6, 0xa9, 0xd4, 0x58, 0xa1, 0x54, 0x0f, 0x58, 0x9b, 0xe7, 0x01, 0x7f, 0x5c, 0x84, 0xfa, - 0x1d, 0xe2, 0x86, 0xa8, 0x17, 0x8e, 0x19, 0x2f, 0xc6, 0x18, 0xef, 0x40, 0x79, 0xe8, 0x4b, 0xc5, - 0xa2, 0x7f, 0xb5, 0x55, 0xa8, 0x62, 0x07, 0x59, 0xb6, 0x78, 0x1a, 0x3e, 0x50, 0x32, 0x52, 0x99, - 0x87, 0x91, 0x47, 0x50, 0xbf, 0x8b, 0x07, 0x68, 0x68, 0x87, 0xda, 0x03, 0xb8, 0x80, 0xc6, 0xf6, - 0x66, 0x7a, 0x63, 0x83, 0xd3, 0x8b, 0x13, 0x08, 0xae, 0x22, 0x85, 0x89, 0x6e, 0x7d, 0x07, 0x16, - 0xee, 0xe2, 0x81, 0xe5, 0x32, 0x48, 0xa0, 0x3d, 0x9c, 0x4c, 0xf9, 0x62, 0x86, 0xb2, 0x10, 0xb7, - 0x9a, 0xf8, 0x1f, 0xab, 0xd0, 0xb8, 0x4b, 0x7a, 0x43, 0x07, 0xbb, 0xa1, 0xa6, 0x43, 0x3d, 0x78, - 0x8a, 0x8e, 0x8e, 0xb0, 0x2f, 0xe4, 0x27, 0x87, 0xda, 0xcb, 0x50, 0xb1, 0xdc, 0x01, 0x61, 0x32, - 0x5c, 0xd8, 0xe9, 0xc4, 0xf7, 0x78, 0xe0, 0x0e, 0x88, 0xc1, 0xa0, 0x54, 0xf8, 0xc7, 0x24, 0x08, - 0x85, 0x54, 0xd9, 0x7f, 0x6d, 0x13, 0x9a, 0x87, 0x28, 0xc0, 0xa6, 0x87, 0xc2, 0x63, 0x61, 0x75, - 0x0d, 0x3a, 0xb1, 0x8f, 0xc2, 0x63, 0xb6, 0x21, 0xe5, 0x0e, 0x07, 0xcc, 0xd2, 0xe8, 0x86, 0x7c, - 0x48, 0x95, 0xab, 0x47, 0xdc, 0x60, 0x48, 0x41, 0x35, 0x06, 0x1a, 0x8f, 0x29, 0xcc, 0xf3, 0x49, - 0x7f, 0xd8, 0xc3, 0x81, 0x5e, 0xe7, 0x30, 0x39, 0xd6, 0x5e, 0x83, 0x2a, 0xdd, 0x29, 0xd0, 0x1b, - 0x8c, 0xd3, 0xe5, 0x38, 0xa7, 0x74, 0xcb, 0xc0, 0xe0, 0x70, 0xed, 0x6d, 0x6a, 0x03, 0x63, 0xa9, - 0xea, 0x4d, 0x86, 0x9e, 0x10, 0x5e, 0x4c, 0xe8, 0x46, 0x1c, 0x57, 0xfb, 0x3a, 0x80, 0x27, 0x6d, - 0x29, 0xd0, 0x81, 0xad, 0xbc, 0x92, 0xdc, 0x48, 0x40, 0xe3, 0x24, 0x62, 0x6b, 0xb4, 0x77, 0xa0, - 0xe9, 0xe3, 0xc0, 0x23, 0x6e, 0x80, 0x03, 0x7d, 0x81, 0x11, 0x78, 0x31, 0x4e, 0xc0, 0x10, 0xc0, - 0xf8, 0xfa, 0x68, 0x85, 0xf6, 0x55, 0x68, 0x04, 0xc2, 0xa9, 0xe8, 0x8b, 0xec, 0xad, 0x27, 0x56, - 0x4b, 0x87, 0x63, 0x70, 0x6b, 0xa4, 0xaf, 0xd6, 0x18, 0x2f, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, - 0xb8, 0x04, 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x38, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, - 0x10, 0x1d, 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x14, 0xa7, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, - 0x03, 0x2d, 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x8f, 0x63, - 0xdf, 0x13, 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x6c, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, - 0xf9, 0x18, 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, - 0xc6, 0xd9, 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, - 0x1b, 0x73, 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, - 0x17, 0x5a, 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0xa4, 0x39, 0x2e, 0x67, 0x39, 0xbe, 0x0e, 0xf5, 0x3e, - 0xf7, 0x6c, 0xcc, 0x86, 0x53, 0xef, 0x98, 0x72, 0x24, 0xe1, 0x89, 0xb0, 0xc0, 0x8d, 0x3a, 0x0a, - 0x0b, 0x32, 0x02, 0xd6, 0x62, 0x11, 0x70, 0x93, 0xda, 0x02, 0xea, 0x9b, 0xc4, 0xb5, 0x47, 0x7a, - 0x5d, 0xc6, 0x11, 0xd4, 0xdf, 0x73, 0xed, 0x51, 0x56, 0x67, 0x1a, 0x73, 0xe9, 0xcc, 0x75, 0xa8, - 0x63, 0xfe, 0xca, 0x85, 0x81, 0x67, 0xd9, 0x16, 0x70, 0xe5, 0x1b, 0x80, 0x79, 0xde, 0xc0, 0x17, - 0x35, 0xd8, 0xb8, 0x4f, 0x7c, 0xe7, 0x2e, 0x0a, 0xd1, 0xd8, 0x01, 0x1c, 0x0c, 0x0f, 0x0f, 0x64, - 0xda, 0x14, 0x89, 0xa5, 0x98, 0x8a, 0x96, 0x3c, 0xb2, 0x96, 0xf2, 0x72, 0x95, 0x72, 0x7e, 0x7c, - 0xae, 0xc4, 0xc2, 0xdc, 0x0d, 0x58, 0x46, 0xb6, 0x4d, 0x9e, 0x9a, 0xd8, 0xf1, 0xc2, 0x91, 0xc9, - 0x13, 0xaf, 0x2a, 0xdb, 0x6a, 0x89, 0x01, 0xee, 0xd1, 0xf9, 0x0f, 0x64, 0xb2, 0x95, 0x79, 0x11, - 0x91, 0xce, 0xd4, 0x13, 0x3a, 0xf3, 0xff, 0x50, 0xb5, 0x42, 0xec, 0x48, 0xd9, 0x6f, 0x26, 0x3c, - 0x9d, 0x6f, 0x39, 0x56, 0x68, 0x9d, 0xf0, 0x4c, 0x32, 0x30, 0x38, 0xa6, 0xf6, 0x3a, 0x2c, 0xf7, - 0x88, 0x6d, 0xe3, 0x1e, 0x65, 0xd6, 0x14, 0x54, 0x9b, 0x8c, 0x6a, 0x27, 0x02, 0xdc, 0xe7, 0xf4, - 0x63, 0xba, 0x05, 0x53, 0x74, 0x4b, 0x87, 0xba, 0x83, 0x4e, 0x2d, 0x67, 0xe8, 0x30, 0xaf, 0x59, - 0x34, 0xe4, 0x90, 0xee, 0x88, 0x4f, 0x7b, 0xf6, 0x30, 0xb0, 0x4e, 0xb0, 0x29, 0x71, 0x16, 0xd9, - 0xc3, 0x77, 0xc6, 0x80, 0x6f, 0x0a, 0x64, 0x4a, 0xc6, 0x72, 0x19, 0x4a, 0x4b, 0x90, 0xe1, 0xc3, - 0x14, 0x19, 0x81, 0xd3, 0x4e, 0x93, 0x11, 0xc8, 0x2f, 0x00, 0x38, 0xe8, 0xd4, 0xb4, 0xb1, 0x7b, - 0x14, 0x1e, 0x33, 0x6f, 0x56, 0x36, 0x9a, 0x0e, 0x3a, 0x7d, 0xc8, 0x26, 0x18, 0xd8, 0x72, 0x25, - 0xb8, 0x23, 0xc0, 0x96, 0x2b, 0xc0, 0x3a, 0xd4, 0x3d, 0x14, 0x52, 0x65, 0xd5, 0x97, 0x79, 0xb0, - 0x15, 0x43, 0x6a, 0x11, 0x94, 0x2e, 0x17, 0xba, 0xc6, 0xd6, 0x35, 0x1c, 0x74, 0xca, 0x24, 0xcc, - 0x80, 0x96, 0x2b, 0x80, 0x2b, 0x02, 0x68, 0xb9, 0x1c, 0xf8, 0x12, 0x2c, 0x0e, 0x5d, 0xeb, 0xc9, - 0x10, 0x0b, 0xf8, 0x2a, 0xe3, 0x7c, 0x81, 0xcf, 0x71, 0x94, 0xab, 0x50, 0xc1, 0xee, 0xd0, 0xd1, - 0x2f, 0x64, 0x5d, 0x35, 0x15, 0x35, 0x03, 0x6a, 0x2f, 0xc2, 0x82, 0x33, 0xb4, 0x43, 0xcb, 0xb3, - 0xb1, 0x49, 0x06, 0xfa, 0x1a, 0x13, 0x12, 0xc8, 0xa9, 0xbd, 0x81, 0xd2, 0x5a, 0x2e, 0xce, 0x65, - 0x2d, 0x55, 0xa8, 0x75, 0x31, 0xea, 0x63, 0x5f, 0x99, 0x16, 0x47, 0xba, 0x58, 0x52, 0xeb, 0x62, - 0xf9, 0x6c, 0xba, 0x58, 0x99, 0xae, 0x8b, 0xd5, 0xd9, 0x75, 0xb1, 0x36, 0x83, 0x2e, 0xd6, 0xa7, - 0xeb, 0x62, 0x63, 0x06, 0x5d, 0x6c, 0xce, 0xa4, 0x8b, 0x30, 0x59, 0x17, 0x17, 0x26, 0xe8, 0xe2, - 0xe2, 0x04, 0x5d, 0x6c, 0x4d, 0xd2, 0xc5, 0xf6, 0x14, 0x5d, 0x5c, 0xca, 0xd7, 0xc5, 0xce, 0x1c, - 0xba, 0xb8, 0x9c, 0xd1, 0xc5, 0x94, 0xb7, 0xd4, 0x66, 0x3b, 0x42, 0xad, 0xcc, 0xa3, 0xad, 0x7f, - 0xab, 0x82, 0xce, 0xb5, 0xf5, 0xdf, 0xe2, 0xd9, 0xa5, 0x85, 0x54, 0x95, 0x16, 0x52, 0x53, 0x5b, - 0x48, 0xfd, 0x6c, 0x16, 0xd2, 0x98, 0x6e, 0x21, 0xcd, 0xd9, 0x2d, 0x04, 0x66, 0xb0, 0x90, 0x85, - 0xe9, 0x16, 0xb2, 0x38, 0x83, 0x85, 0xb4, 0x66, 0xb2, 0x90, 0xf6, 0x64, 0x0b, 0x59, 0x9a, 0x60, - 0x21, 0x9d, 0x09, 0x16, 0xb2, 0x3c, 0xc9, 0x42, 0xb4, 0x29, 0x16, 0xb2, 0x92, 0x6f, 0x21, 0xab, - 0x73, 0x58, 0xc8, 0x85, 0x99, 0xbc, 0xf5, 0xda, 0x3c, 0xfa, 0xff, 0x2d, 0xa8, 0x73, 0xf5, 0x7f, - 0x86, 0xe3, 0x27, 0x5f, 0x98, 0x93, 0x3c, 0x7f, 0x5e, 0x82, 0x0a, 0x3d, 0x40, 0x46, 0x89, 0x69, - 0x31, 0x9e, 0x98, 0xea, 0x50, 0x3f, 0xc1, 0x7e, 0x10, 0x55, 0x46, 0xe4, 0x70, 0x06, 0x43, 0xba, - 0x06, 0x9d, 0x10, 0xfb, 0x4e, 0x60, 0x92, 0x81, 0x19, 0x60, 0xff, 0xc4, 0xea, 0x49, 0xa3, 0x6a, - 0xb3, 0xf9, 0xbd, 0xc1, 0x01, 0x9f, 0xd5, 0x6e, 0x42, 0xbd, 0xc7, 0xcb, 0x07, 0xc2, 0xe9, 0xaf, - 0xc4, 0x1f, 0x42, 0x54, 0x16, 0x0c, 0x89, 0x43, 0xd1, 0x6d, 0xab, 0x87, 0xdd, 0x80, 0xa7, 0x4f, - 0x29, 0xf4, 0x87, 0x1c, 0x64, 0x48, 0x1c, 0xa5, 0xf0, 0xeb, 0xf3, 0x08, 0xff, 0x2d, 0x68, 0x32, - 0x65, 0x60, 0xb5, 0xba, 0x1b, 0xb1, 0x5a, 0x5d, 0x79, 0x72, 0x61, 0x65, 0xeb, 0x2e, 0xb4, 0xbe, - 0x11, 0x10, 0xd7, 0xc0, 0x03, 0xec, 0x63, 0xb7, 0x87, 0xb5, 0x65, 0xa8, 0x98, 0x3e, 0x1e, 0x08, - 0x19, 0x97, 0x0d, 0x3c, 0x98, 0x5e, 0x7f, 0xda, 0xf2, 0xa0, 0x2e, 0x9e, 0x69, 0xc6, 0xe2, 0xca, - 0x99, 0xcf, 0x32, 0xf7, 0xa0, 0x21, 0x81, 0xca, 0x2d, 0x5f, 0x91, 0x55, 0xc5, 0x92, 0xda, 0x01, - 0x71, 0xe8, 0xd6, 0xbb, 0xb0, 0x10, 0x53, 0x40, 0x25, 0xa5, 0x6b, 0x49, 0x4a, 0x09, 0x61, 0x0a, - 0xbd, 0x15, 0xc4, 0xde, 0x87, 0x36, 0x23, 0x16, 0x15, 0xd1, 0x54, 0xf4, 0x5e, 0x4f, 0xd2, 0xbb, - 0xa0, 0x2c, 0x0a, 0x48, 0x92, 0x7b, 0xd0, 0x12, 0x24, 0xc3, 0x63, 0xf6, 0x6e, 0x55, 0x14, 0x6f, - 0x24, 0x29, 0xae, 0xa6, 0xeb, 0x19, 0x74, 0x61, 0x9a, 0xa0, 0xac, 0x1e, 0xcc, 0x4d, 0x50, 0x2e, - 0x94, 0x04, 0x3f, 0x02, 0x2d, 0x41, 0x70, 0x7c, 0x76, 0xc8, 0x50, 0xbd, 0x95, 0xa4, 0xba, 0xae, - 0xa2, 0xca, 0x56, 0xa7, 0x5f, 0x8e, 0x88, 0xa1, 0xf3, 0xbe, 0x1c, 0xa1, 0xe9, 0x82, 0x98, 0x03, - 0x97, 0x38, 0xb1, 0x6c, 0x69, 0x22, 0x57, 0xb0, 0x6f, 0x27, 0xa9, 0x5f, 0x9d, 0x52, 0xf7, 0x88, - 0xcb, 0xf9, 0x2d, 0xc9, 0x7b, 0xe8, 0x5b, 0xee, 0x91, 0x92, 0xfa, 0x6a, 0x9c, 0x7a, 0x53, 0x2e, - 0x7c, 0x0c, 0x9d, 0xd8, 0xc2, 0x5d, 0xdf, 0x47, 0x6a, 0x05, 0xbf, 0x99, 0xe4, 0x2d, 0xe1, 0x53, - 0x63, 0x6b, 0x25, 0xd9, 0xdf, 0x94, 0xa1, 0xf3, 0x1e, 0x71, 0x93, 0x35, 0x5e, 0x0c, 0x9b, 0xc7, - 0x4c, 0x83, 0xcd, 0x71, 0xdd, 0xc9, 0x0c, 0x86, 0x87, 0x66, 0xa2, 0xd2, 0xff, 0x72, 0x56, 0xe1, - 0xb3, 0x09, 0x4e, 0xb7, 0x60, 0xe8, 0xc7, 0x79, 0xc9, 0x8f, 0x0d, 0x97, 0x69, 0xc2, 0x60, 0xf6, - 0x51, 0x88, 0xd4, 0x3b, 0xf1, 0x67, 0x78, 0x35, 0xbe, 0x53, 0xfe, 0x31, 0xb9, 0x5b, 0x30, 0x36, - 0x06, 0xf9, 0x87, 0xe8, 0x43, 0xd8, 0x78, 0x32, 0xc4, 0xfe, 0x48, 0xbd, 0x53, 0x39, 0xfb, 0x26, - 0xdf, 0xa7, 0xd8, 0xca, 0x6d, 0x2e, 0x3e, 0x51, 0x83, 0x34, 0x13, 0xd6, 0x3d, 0x14, 0x1e, 0xab, - 0xb7, 0xe0, 0xc5, 0x8f, 0xad, 0xb4, 0x15, 0x2a, 0x77, 0x58, 0xf3, 0x94, 0x90, 0xa8, 0x49, 0xf2, - 0x79, 0x09, 0xf4, 0x3d, 0x34, 0x0c, 0x8f, 0x77, 0x76, 0x7b, 0x3d, 0x1c, 0x04, 0x77, 0x48, 0x1f, - 0x4f, 0xeb, 0x73, 0x0c, 0x6c, 0xf2, 0x54, 0x56, 0xe5, 0xe9, 0x7f, 0xed, 0x0d, 0x1a, 0x10, 0x88, - 0x87, 0xe5, 0x91, 0x28, 0x51, 0x1a, 0xe1, 0xd4, 0x0f, 0x18, 0xdc, 0x10, 0x78, 0x34, 0x6b, 0xa2, - 0xd3, 0xc4, 0xb7, 0xbe, 0xcf, 0xfa, 0x13, 0x26, 0xf5, 0xdf, 0xe2, 0x40, 0x94, 0x00, 0x3c, 0xf6, - 0x6d, 0x9a, 0xc0, 0x84, 0xe4, 0x53, 0xcc, 0x91, 0x78, 0xfe, 0xd9, 0x60, 0x13, 0x14, 0x98, 0x0a, - 0x1e, 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, - 0x2c, 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xc4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, - 0xab, 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, - 0x33, 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, - 0x07, 0xde, 0xfc, 0xc7, 0xb0, 0x18, 0xe7, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, - 0x3f, 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, - 0x2f, 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, - 0x3b, 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, - 0xbc, 0xe3, 0xb9, 0x07, 0xfd, 0x44, 0xef, 0xa7, 0x96, 0xea, 0xfd, 0xc4, 0x7b, 0x46, 0xf5, 0x54, - 0xcf, 0xe8, 0x2b, 0x89, 0x9e, 0x4d, 0x83, 0x89, 0x6e, 0x43, 0x99, 0x9e, 0xf1, 0x50, 0x1f, 0xef, - 0xd6, 0xbc, 0x19, 0xef, 0xd6, 0x34, 0xb3, 0x99, 0x9d, 0x4c, 0x70, 0x12, 0x3d, 0x9a, 0x58, 0x6b, - 0x0b, 0x92, 0xad, 0xad, 0xcb, 0x00, 0x7d, 0xec, 0xf9, 0xb8, 0x87, 0x42, 0xdc, 0x17, 0xa7, 0xde, - 0xd8, 0xcc, 0xd9, 0xba, 0x3b, 0x2a, 0xf5, 0x6b, 0xcd, 0xa3, 0x7e, 0xbf, 0x2c, 0x42, 0x33, 0xca, - 0x22, 0x6e, 0x43, 0xfb, 0x90, 0xf4, 0x63, 0xf1, 0x56, 0x24, 0x0e, 0x89, 0x04, 0x2f, 0x91, 0x78, - 0x74, 0x0b, 0x46, 0xeb, 0x30, 0x91, 0x89, 0x3c, 0x04, 0xcd, 0x25, 0xae, 0x99, 0xa2, 0xc3, 0xd3, - 0x82, 0x4b, 0x09, 0xa6, 0x52, 0x39, 0x4c, 0xb7, 0x60, 0x74, 0xdc, 0xd4, 0x5c, 0x14, 0x3d, 0x8f, - 0x60, 0x55, 0xd5, 0x67, 0xd3, 0xf6, 0x26, 0xdb, 0xcb, 0x46, 0x46, 0x0c, 0x51, 0x62, 0xae, 0x36, - 0x99, 0xcf, 0x8a, 0xd0, 0x4e, 0x6a, 0x87, 0xf6, 0x25, 0x68, 0xa6, 0x25, 0xa2, 0xce, 0xf5, 0xbb, - 0x05, 0x23, 0xc2, 0xa4, 0xd2, 0xfc, 0x24, 0x20, 0x2e, 0x3d, 0x83, 0xf1, 0x13, 0x99, 0x2a, 0x5d, - 0x4e, 0x1c, 0xd9, 0xa8, 0x34, 0x3f, 0x89, 0x4f, 0x44, 0xcf, 0xff, 0xfb, 0x32, 0x34, 0xc6, 0x47, - 0x07, 0xc5, 0xc9, 0xee, 0x35, 0x28, 0x1f, 0xe1, 0x50, 0x75, 0x12, 0x19, 0xdb, 0xbf, 0x41, 0x31, - 0x28, 0xa2, 0x37, 0x0c, 0x85, 0x7f, 0xcc, 0x43, 0xf4, 0x86, 0xa1, 0x76, 0x1d, 0x2a, 0x1e, 0x09, - 0x64, 0x07, 0x28, 0x07, 0x93, 0xa1, 0x68, 0x37, 0xa1, 0xd6, 0xc7, 0x36, 0x0e, 0xb1, 0x38, 0x51, - 0xe7, 0x20, 0x0b, 0x24, 0xed, 0x16, 0xd4, 0x89, 0xc7, 0xdb, 0x90, 0xb5, 0x49, 0xf8, 0x12, 0x8b, - 0xb2, 0x42, 0x53, 0x52, 0x51, 0xe4, 0xca, 0x63, 0x85, 0xa2, 0xd0, 0x33, 0x99, 0x87, 0xc2, 0xde, - 0xb1, 0x68, 0x5f, 0xe4, 0xe0, 0x72, 0x9c, 0x94, 0x9b, 0x68, 0xce, 0xe5, 0x26, 0xce, 0xdc, 0x41, - 0xfa, 0x6b, 0x15, 0xd6, 0xd4, 0xd9, 0xe4, 0x79, 0x8d, 0xf1, 0xbc, 0xc6, 0xf8, 0xdf, 0x5e, 0x63, - 0x7c, 0x0a, 0x55, 0x76, 0x41, 0x43, 0x49, 0xa9, 0x38, 0x07, 0x25, 0xed, 0x26, 0x54, 0xd8, 0x6d, - 0x93, 0x12, 0x5b, 0xb4, 0xae, 0x70, 0xf8, 0xa2, 0x6e, 0xc2, 0xd0, 0xb6, 0x7e, 0x56, 0x85, 0xa5, - 0x94, 0xd6, 0x9e, 0xf7, 0xa4, 0xce, 0x7b, 0x52, 0x67, 0xea, 0x49, 0xa9, 0x74, 0x58, 0x9b, 0xc7, - 0x1a, 0xbe, 0x0d, 0x10, 0xa5, 0x20, 0xcf, 0xf9, 0xce, 0xd7, 0xaf, 0x6a, 0x70, 0x31, 0xa7, 0x30, - 0x72, 0x7e, 0x4d, 0xe1, 0xfc, 0x9a, 0xc2, 0xf9, 0x35, 0x85, 0xc8, 0x0c, 0xff, 0x5e, 0x84, 0xc6, - 0xb8, 0x9c, 0x3e, 0xfd, 0x62, 0xd7, 0xf6, 0xb8, 0x3b, 0xc3, 0xd3, 0xee, 0xb5, 0x6c, 0xcd, 0x9a, - 0x05, 0x1e, 0x79, 0xf5, 0xf5, 0x26, 0xd4, 0x79, 0x65, 0x55, 0x06, 0x8f, 0x95, 0x6c, 0x41, 0x36, - 0x30, 0x24, 0x8e, 0xf6, 0x06, 0x34, 0xc4, 0x75, 0x25, 0x79, 0xb2, 0x5e, 0x4d, 0x9e, 0xac, 0x39, - 0xcc, 0x18, 0x63, 0x9d, 0xfd, 0x4e, 0x33, 0x86, 0x15, 0xc5, 0x65, 0x44, 0xed, 0xbd, 0xc9, 0x0e, - 0x29, 0x1b, 0x73, 0xc7, 0xad, 0x05, 0xb5, 0x4b, 0xfa, 0x49, 0x11, 0x5a, 0xc9, 0x2e, 0xc3, 0x0e, - 0x75, 0x44, 0x7c, 0x62, 0x7c, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x31, 0xde, 0xf3, 0x3d, - 0x5f, 0xfd, 0xb4, 0x08, 0xcd, 0xf1, 0xc9, 0x5e, 0xbb, 0x03, 0x2d, 0xb9, 0x8d, 0xd9, 0x23, 0x7d, - 0x2c, 0x1e, 0xf4, 0x72, 0xee, 0x83, 0xf2, 0x6e, 0xc7, 0xa2, 0x5c, 0x74, 0x87, 0xf4, 0xd5, 0xad, - 0xc0, 0xd2, 0x3c, 0x6f, 0xe3, 0xd7, 0x4d, 0xa8, 0x09, 0x47, 0xad, 0x38, 0xf1, 0xe5, 0x25, 0x28, - 0xe3, 0xde, 0x6a, 0x79, 0xc2, 0xa5, 0xbf, 0xca, 0xc4, 0x4b, 0x7f, 0xd3, 0x12, 0x8f, 0x94, 0x25, - 0xd6, 0x32, 0x96, 0x18, 0x73, 0x89, 0xf5, 0x19, 0x5c, 0x62, 0x63, 0xba, 0x4b, 0x6c, 0xce, 0xe0, - 0x12, 0x61, 0x26, 0x97, 0xb8, 0x30, 0xd9, 0x25, 0x2e, 0x4e, 0x70, 0x89, 0xad, 0x09, 0x2e, 0xb1, - 0x3d, 0xc9, 0x25, 0x2e, 0x4d, 0x71, 0x89, 0x9d, 0xac, 0x4b, 0x7c, 0x05, 0xda, 0x94, 0x78, 0xcc, - 0xd8, 0xf8, 0x49, 0xa0, 0xe5, 0xa0, 0xd3, 0x58, 0xae, 0x40, 0xd1, 0x2c, 0x37, 0x8e, 0xa6, 0x09, - 0x34, 0xcb, 0x8d, 0xa1, 0xc5, 0x03, 0xfd, 0x4a, 0xea, 0x9a, 0xe6, 0x4c, 0x27, 0x82, 0x8f, 0xf2, - 0x5c, 0xc0, 0x85, 0x6c, 0x6b, 0x29, 0xef, 0xd3, 0x13, 0xb5, 0x37, 0xd0, 0xae, 0x89, 0xb0, 0xbf, - 0x96, 0xb5, 0xfb, 0x47, 0x23, 0x0f, 0xf3, 0xdc, 0x9d, 0x25, 0x03, 0xaf, 0xcb, 0xa0, 0x7f, 0x31, - 0x7b, 0xb8, 0x1f, 0x37, 0xcd, 0x65, 0xb8, 0xbf, 0x0e, 0x35, 0x64, 0xdb, 0x54, 0x3f, 0xf5, 0xdc, - 0xde, 0x79, 0x15, 0xd9, 0xf6, 0xde, 0x40, 0xfb, 0x32, 0x40, 0xec, 0x89, 0xd6, 0xb3, 0xce, 0x3c, - 0xe2, 0xd6, 0x88, 0x61, 0x6a, 0x2f, 0x43, 0xab, 0x6f, 0x51, 0x0b, 0x72, 0x2c, 0x17, 0x85, 0xc4, - 0xd7, 0x37, 0x98, 0x82, 0x24, 0x27, 0x93, 0x57, 0x5e, 0x37, 0x53, 0x57, 0x5e, 0x5f, 0x82, 0xf2, - 0xa9, 0x63, 0xeb, 0x97, 0xb2, 0x16, 0xf7, 0xa1, 0x63, 0x1b, 0x14, 0x96, 0x2d, 0xb3, 0xbe, 0xf0, - 0xac, 0xb7, 0x62, 0x2f, 0x3f, 0xc3, 0xad, 0xd8, 0x17, 0xe7, 0xf1, 0x58, 0x3f, 0x00, 0x88, 0xe2, - 0xde, 0x9c, 0x5f, 0x1a, 0xbd, 0x0d, 0x0b, 0x03, 0xcb, 0xc6, 0x66, 0x7e, 0x48, 0x8d, 0x6e, 0x3c, - 0x77, 0x0b, 0x06, 0x0c, 0xc6, 0xa3, 0xc8, 0x8b, 0x87, 0xb0, 0xa2, 0xe8, 0xe6, 0x6a, 0xdf, 0x9d, - 0x1c, 0xbf, 0xae, 0x65, 0x13, 0xea, 0x9c, 0x96, 0xb0, 0x3a, 0x9c, 0xfd, 0xa9, 0x02, 0x17, 0xf3, - 0x9a, 0xd1, 0x0e, 0xbc, 0x70, 0x88, 0x02, 0xab, 0x67, 0xa2, 0xc4, 0x57, 0x42, 0xe6, 0xb8, 0xe6, - 0xcb, 0x45, 0xf3, 0x5a, 0xa2, 0xc2, 0x9a, 0xff, 0x55, 0x51, 0xb7, 0x60, 0x6c, 0x1e, 0x4e, 0xf8, - 0xe8, 0xe8, 0x3e, 0x74, 0x90, 0x67, 0x99, 0x9f, 0xe2, 0x51, 0xb4, 0x03, 0x97, 0x64, 0xa2, 0xae, - 0x95, 0xfc, 0xca, 0xaa, 0x5b, 0x30, 0xda, 0x28, 0xf9, 0xdd, 0xd5, 0xf7, 0x40, 0x27, 0xac, 0x2d, - 0x61, 0x5a, 0xa2, 0x21, 0x15, 0xd1, 0x2b, 0x67, 0xbb, 0xa2, 0xea, 0xde, 0x55, 0xb7, 0x60, 0xac, - 0x11, 0x75, 0x57, 0x2b, 0xa2, 0xef, 0x89, 0x5e, 0x4f, 0x44, 0xbf, 0x92, 0x47, 0x3f, 0xdd, 0x16, - 0x8a, 0xe8, 0x67, 0x1a, 0x46, 0x47, 0xb0, 0x29, 0xe8, 0xa3, 0xa8, 0x91, 0x18, 0x6d, 0xc1, 0x03, - 0xdc, 0x2b, 0xd9, 0x2d, 0x14, 0x6d, 0xc7, 0x6e, 0xc1, 0x58, 0x27, 0xb9, 0x3d, 0x49, 0x1c, 0x6d, - 0xc4, 0xba, 0xba, 0x2c, 0x5d, 0x88, 0x36, 0xaa, 0x65, 0xbd, 0x63, 0x5e, 0x0f, 0xb8, 0x5b, 0x30, - 0x84, 0x4c, 0xb2, 0xb0, 0x48, 0xc3, 0x8f, 0x23, 0x0d, 0x8f, 0xb5, 0x04, 0xb4, 0xf7, 0x27, 0x6b, - 0xf8, 0xa5, 0x9c, 0xb6, 0x11, 0xbf, 0x58, 0xa0, 0xd6, 0xea, 0xab, 0xb0, 0x10, 0xbf, 0xb9, 0xb0, - 0x1a, 0x7d, 0xdc, 0x57, 0x8e, 0xee, 0x38, 0xfc, 0xb6, 0x08, 0xe5, 0x47, 0x48, 0x7d, 0x2b, 0x62, - 0xfa, 0xc7, 0x6e, 0x19, 0xcf, 0x56, 0x3e, 0xf3, 0x37, 0x22, 0x73, 0x7d, 0xc1, 0x75, 0x05, 0x1a, - 0x32, 0xc2, 0xe4, 0x3c, 0xdf, 0xc7, 0xb0, 0xf4, 0x41, 0xaa, 0xde, 0xf4, 0x1c, 0x3f, 0x26, 0xf9, - 0x5d, 0x11, 0xca, 0x1f, 0x3a, 0xb6, 0x52, 0x7a, 0x97, 0xa0, 0x49, 0x7f, 0x03, 0x0f, 0xf5, 0xe4, - 0xbd, 0x92, 0x68, 0x82, 0x26, 0x7f, 0x9e, 0x8f, 0x07, 0xd6, 0xa9, 0xc8, 0xf2, 0xc4, 0x88, 0xae, - 0x42, 0x61, 0xe8, 0x5b, 0x87, 0xc3, 0x10, 0x8b, 0xcf, 0xf4, 0xa2, 0x09, 0x9a, 0xca, 0x3c, 0xf5, - 0x91, 0xe7, 0xe1, 0xbe, 0x38, 0x82, 0xcb, 0xe1, 0x99, 0xfb, 0x98, 0xb7, 0x5f, 0x85, 0x36, 0xf1, - 0x8f, 0x24, 0xae, 0x79, 0xb2, 0x73, 0x7b, 0x51, 0x7c, 0xbb, 0xba, 0xef, 0x93, 0x90, 0xec, 0x17, - 0x7f, 0x51, 0x2a, 0xef, 0xed, 0x1e, 0x1c, 0xd6, 0xd8, 0xc7, 0xa0, 0x6f, 0xfe, 0x33, 0x00, 0x00, - 0xff, 0xff, 0xdc, 0xb2, 0x46, 0x98, 0xe4, 0x3a, 0x00, 0x00, +func (x *Xml) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor + +var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, + 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, + 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, + 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, + 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, + 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, + 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, + 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, + 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, + 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, + 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, + 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, + 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, + 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, + 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, + 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, + 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, + 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, + 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, + 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, + 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, + 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, + 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, + 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, + 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, + 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, + 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, + 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, + 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, + 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, + 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, + 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, + 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, + 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, + 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, + 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, + 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, + 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, + 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, + 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, + 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, + 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, + 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, + 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, + 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, + 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once + file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc +) + +func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { + file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { + file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) + }) + return file_openapiv2_OpenAPIv2_proto_rawDescData +} + +var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v2.Any + (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity + (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity + (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter + (*Contact)(nil), // 5: openapi.v2.Contact + (*Default)(nil), // 6: openapi.v2.Default + (*Definitions)(nil), // 7: openapi.v2.Definitions + (*Document)(nil), // 8: openapi.v2.Document + (*Examples)(nil), // 9: openapi.v2.Examples + (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs + (*FileSchema)(nil), // 11: openapi.v2.FileSchema + (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema + (*Header)(nil), // 13: openapi.v2.Header + (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema + (*Headers)(nil), // 15: openapi.v2.Headers + (*Info)(nil), // 16: openapi.v2.Info + (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem + (*JsonReference)(nil), // 18: openapi.v2.JsonReference + (*License)(nil), // 19: openapi.v2.License + (*NamedAny)(nil), // 20: openapi.v2.NamedAny + (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader + (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter + (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem + (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse + (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue + (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema + (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem + (*NamedString)(nil), // 28: openapi.v2.NamedString + (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray + (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter + (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity + (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity + (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity + (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity + (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes + (*Operation)(nil), // 36: openapi.v2.Operation + (*Parameter)(nil), // 37: openapi.v2.Parameter + (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions + (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem + (*PathItem)(nil), // 40: openapi.v2.PathItem + (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema + (*Paths)(nil), // 42: openapi.v2.Paths + (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems + (*Properties)(nil), // 44: openapi.v2.Properties + (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema + (*Response)(nil), // 46: openapi.v2.Response + (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions + (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue + (*Responses)(nil), // 49: openapi.v2.Responses + (*Schema)(nil), // 50: openapi.v2.Schema + (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem + (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions + (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem + (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement + (*StringArray)(nil), // 55: openapi.v2.StringArray + (*Tag)(nil), // 56: openapi.v2.Tag + (*TypeItem)(nil), // 57: openapi.v2.TypeItem + (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension + (*Xml)(nil), // 59: openapi.v2.Xml + (*anypb.Any)(nil), // 60: google.protobuf.Any +} +var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ + 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema + 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any + 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema + 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny + 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema + 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info + 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths + 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions + 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions + 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions + 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement + 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions + 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag + 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any + 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any + 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems + 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any + 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any + 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader + 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact + 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License + 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema + 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any + 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header + 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter + 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem + 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response + 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue + 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema + 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem + 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray + 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema + 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema + 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema + 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema + 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString + 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs + 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem + 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses + 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement + 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny + 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter + 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter + 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter + 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter + 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference + 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation + 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation + 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation + 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation + 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation + 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation + 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation + 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem + 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny + 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem + 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems + 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any + 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any + 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny + 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema + 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem + 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers + 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples + 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny + 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse + 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response + 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference + 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue + 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any + 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any + 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem + 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem + 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem + 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema + 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties + 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml + 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any + 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema + 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema + 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem + 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity + 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity + 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity + 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity + 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity + 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity + 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray + 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny + 133, // [133:133] is the sub-list for method output_type + 133, // [133:133] is the sub-list for method input_type + 133, // [133:133] is the sub-list for extension type_name + 133, // [133:133] is the sub-list for extension extendee + 0, // [0:133] is the sub-list for field type_name +} + +func init() { file_openapiv2_OpenAPIv2_proto_init() } +func file_openapiv2_OpenAPIv2_proto_init() { + if File_openapiv2_OpenAPIv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiKeySecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BasicAuthenticationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Default); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Definitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Examples); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FormDataParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Headers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonBodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2AccessCodeSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ApplicationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ImplicitSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2PasswordSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2Scopes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrimitivesItems); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VendorExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, + NumEnums: 0, + NumMessages: 60, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, + DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, + MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, + }.Build() + File_openapiv2_OpenAPIv2_proto = out.File + file_openapiv2_OpenAPIv2_proto_rawDesc = nil + file_openapiv2_OpenAPIv2_proto_goTypes = nil + file_openapiv2_OpenAPIv2_proto_depIdxs = nil } diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto index 557c88072cd83..1c59b2f4ae132 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto +++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,6 +41,9 @@ option java_package = "org.openapi_v2"; // the future. 'GPB' is reserved for the protocol buffer implementation itself. option objc_class_prefix = "OAS"; +// The Go package name. +option go_package = "./openapiv2;openapi_v2"; + message AdditionalPropertiesItem { oneof oneof { Schema schema = 1; @@ -553,7 +556,7 @@ message Response { repeated NamedAny vendor_extension = 5; } -// One or more JSON representations for parameters +// One or more JSON representations for responses message ResponseDefinitions { repeated NamedResponse additional_properties = 1; } diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/README.md b/vendor/github.com/googleapis/gnostic/openapiv2/README.md index 836fb32a7ef04..5276128d3b8e8 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/README.md +++ b/vendor/github.com/googleapis/gnostic/openapiv2/README.md @@ -1,16 +1,14 @@ # OpenAPI v2 Protocol Buffer Models -This directory contains a Protocol Buffer-language model -and related code for supporting OpenAPI v2. +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v2. -Gnostic applications and plugins can use OpenAPIv2.proto -to generate Protocol Buffer support code for their preferred languages. +Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol +Buffer support code for their preferred languages. -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI -descriptions into the Protocol Buffer-based datastructures -generated from OpenAPIv2.proto. +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic -compiler generator, and OpenAPIv2.pb.go is generated by -protoc, the Protocol Buffer compiler, and protoc-gen-go, the -Protocol Buffer Go code generation plugin. +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler +generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/document.go b/vendor/github.com/googleapis/gnostic/openapiv2/document.go new file mode 100644 index 0000000000000..56e5966b4cbe9 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/document.go @@ -0,0 +1,41 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v2 + +import ( + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v3" +) + +// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + root := info.Content[0] + return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) +} + +// YAMLValue produces a serialized YAML representation of the document. +func (d *Document) YAMLValue(comment string) ([]byte, error) { + rawInfo := d.ToRawInfo() + rawInfo = &yaml.Node{ + Kind: yaml.DocumentNode, + Content: []*yaml.Node{rawInfo}, + HeadComment: comment, + } + return yaml.Marshal(rawInfo) +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json index 2815a26ea718c..afa12b79b8fed 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json +++ b/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json @@ -203,7 +203,7 @@ "additionalProperties": { "$ref": "#/definitions/response" }, - "description": "One or more JSON representations for parameters" + "description": "One or more JSON representations for responses" }, "externalDocs": { "type": "object", @@ -1607,4 +1607,4 @@ } } } -} +} \ No newline at end of file diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index ff544c3a3cc5e..29873b796bb0c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -64,6 +64,7 @@ type Rule struct { } // ScopeType specifies a scope for a Rule. +// +enum type ScopeType string const ( @@ -77,6 +78,7 @@ const ( ) // FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled. +// +enum type FailurePolicyType string const ( @@ -87,6 +89,7 @@ const ( ) // MatchPolicyType specifies the type of match policy. +// +enum type MatchPolicyType string const ( @@ -97,6 +100,7 @@ const ( ) // SideEffectClass specifies the types of side effects a webhook may have. +// +enum type SideEffectClass string const ( @@ -450,6 +454,7 @@ type MutatingWebhook struct { } // ReinvocationPolicyType specifies what type of policy the admission hook uses. +// +enum type ReinvocationPolicyType string const ( @@ -476,6 +481,7 @@ type RuleWithOperations struct { } // OperationType specifies an operation for a request. +// +enum type OperationType string // The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index 3afb7467377c2..cff7377a55745 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c4570d0311b72..23ddb1fa2c1e1 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go index bfd93a05ad3b8..09a92f47689ed 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index 539c8c9e2b201..c4506eb923659 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -86,9 +86,12 @@ message StorageVersionCondition { // A list of StorageVersions. message StorageVersionList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + // Items holds a list of StorageVersion repeated StorageVersion items = 2; } diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go index 880091b6f82bb..bfa249e135c79 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -121,7 +121,10 @@ type StorageVersionCondition struct { // A list of StorageVersions. type StorageVersionList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"` + // Items holds a list of StorageVersion + Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index b05c285954361..297ed08a71570 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -64,7 +64,9 @@ func (StorageVersionCondition) SwaggerDoc() map[string]string { } var map_StorageVersionList = map[string]string{ - "": "A list of StorageVersions.", + "": "A list of StorageVersions.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items holds a list of StorageVersion", } func (StorageVersionList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go index 7e82a9070fd9d..44dffa75128e2 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 19fe456388674..81d51bd58f5cb 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -748,10 +748,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { + *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} +} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo + func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{25} + return fileDescriptor_e1014cab6f31e43b, []int{26} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +809,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{26} + return fileDescriptor_e1014cab6f31e43b, []int{27} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -807,7 +837,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{28} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -858,6 +888,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") @@ -868,134 +899,142 @@ func init() { } var fileDescriptor_e1014cab6f31e43b = []byte{ - // 2031 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x37, 0x0e, 0xfc, 0x27, 0x08, 0x21, 0xb8, 0xa1, 0x08, 0x71, 0xd9, - 0x0b, 0x52, 0xc4, 0x85, 0x9c, 0x2c, 0x76, 0x72, 0x42, 0x28, 0x47, 0x2e, 0xb9, 0x80, 0xaa, 0xba, - 0xfa, 0xbb, 0xda, 0x33, 0xf6, 0x26, 0xce, 0x87, 0x72, 0xf3, 0x54, 0xfd, 0xde, 0xaf, 0xde, 0xab, - 0x7a, 0x55, 0xef, 0xd7, 0x55, 0x06, 0xf7, 0x8e, 0xbf, 0xeb, 0xa9, 0xba, 0xdd, 0x3e, 0xf6, 0x0f, - 0x89, 0x6b, 0x11, 0x4a, 0xbc, 0xf6, 0x90, 0x58, 0x9a, 0xed, 0xb6, 0x45, 0x07, 0x76, 0xf4, 0x36, - 0x76, 0x1c, 0xaf, 0x3d, 0xbc, 0xdd, 0x1e, 0x10, 0x8b, 0xb8, 0x98, 0x12, 0x4d, 0x75, 0x5c, 0x9b, - 0xda, 0x10, 0x06, 0x18, 0x15, 0x3b, 0xba, 0xca, 0x30, 0xea, 0xf0, 0xf6, 0xd5, 0x5b, 0x03, 0x9d, - 0x1e, 0xf9, 0x87, 0x6a, 0xdf, 0x36, 0xdb, 0x03, 0x7b, 0x60, 0xb7, 0x39, 0xf4, 0xd0, 0x7f, 0xc4, - 0x7f, 0xf1, 0x1f, 0xfc, 0xaf, 0x80, 0xe2, 0x6a, 0x2b, 0x31, 0x4c, 0xdf, 0x76, 0x89, 0x64, 0x98, - 0xab, 0x77, 0x63, 0x8c, 0x89, 0xfb, 0x47, 0xba, 0x45, 0xdc, 0x51, 0xdb, 0x39, 0x1e, 0xb0, 0x06, - 0xaf, 0x6d, 0x12, 0x8a, 0x65, 0x56, 0xed, 0x22, 0x2b, 0xd7, 0xb7, 0xa8, 0x6e, 0x92, 0x9c, 0xc1, - 0x6b, 0x93, 0x0c, 0xbc, 0xfe, 0x11, 0x31, 0x71, 0xce, 0xee, 0x95, 0x22, 0x3b, 0x9f, 0xea, 0x46, - 0x5b, 0xb7, 0xa8, 0x47, 0xdd, 0xac, 0x51, 0xeb, 0xbf, 0x0a, 0x80, 0x5d, 0xdb, 0xa2, 0xae, 0x6d, - 0x18, 0xc4, 0x45, 0x64, 0xa8, 0x7b, 0xba, 0x6d, 0xc1, 0x9f, 0x83, 0x1a, 0x8b, 0x47, 0xc3, 0x14, - 0xd7, 0x95, 0xeb, 0xca, 0xc6, 0xc2, 0x9d, 0xef, 0xa8, 0xf1, 0x24, 0x47, 0xf4, 0xaa, 0x73, 0x3c, - 0x60, 0x0d, 0x9e, 0xca, 0xd0, 0xea, 0xf0, 0xb6, 0xba, 0x7b, 0xf8, 0x2e, 0xe9, 0xd3, 0x1e, 0xa1, - 0xb8, 0x03, 0x9f, 0x9c, 0x34, 0x67, 0xc6, 0x27, 0x4d, 0x10, 0xb7, 0xa1, 0x88, 0x15, 0xee, 0x82, - 0x0a, 0x67, 0x2f, 0x71, 0xf6, 0x5b, 0x85, 0xec, 0x22, 0x68, 0x15, 0xe1, 0x5f, 0xbc, 0xf1, 0x98, - 0x12, 0x8b, 0xb9, 0xd7, 0xb9, 0x24, 0xa8, 0x2b, 0x5b, 0x98, 0x62, 0xc4, 0x89, 0xe0, 0xcb, 0xa0, - 0xe6, 0x0a, 0xf7, 0xeb, 0xe5, 0xeb, 0xca, 0x46, 0xb9, 0x73, 0x59, 0xa0, 0x6a, 0x61, 0x58, 0x28, - 0x42, 0xb4, 0xfe, 0xa6, 0x80, 0xb5, 0x7c, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x3b, 0xb9, 0xd8, 0xd5, - 0xe9, 0x62, 0x67, 0xd6, 0x3c, 0xf2, 0x68, 0xe0, 0xb0, 0x25, 0x11, 0xf7, 0xdb, 0xa0, 0xaa, 0x53, - 0x62, 0x7a, 0xf5, 0xd2, 0xf5, 0xf2, 0xc6, 0xc2, 0x9d, 0x1b, 0x6a, 0x3e, 0x77, 0xd5, 0xbc, 0x63, - 0x9d, 0x45, 0x41, 0x59, 0x7d, 0x8b, 0x19, 0xa3, 0x80, 0xa3, 0xf5, 0x3f, 0x05, 0xcc, 0x6f, 0x61, - 0x62, 0xda, 0xd6, 0x3e, 0xa1, 0x17, 0xb0, 0x68, 0x5d, 0x50, 0xf1, 0x1c, 0xd2, 0x17, 0x8b, 0xf6, - 0x0d, 0x99, 0xef, 0x91, 0x3b, 0xfb, 0x0e, 0xe9, 0xc7, 0x0b, 0xc5, 0x7e, 0x21, 0x6e, 0x0c, 0xdf, - 0x06, 0xb3, 0x1e, 0xc5, 0xd4, 0xf7, 0xf8, 0x32, 0x2d, 0xdc, 0xf9, 0xe6, 0xe9, 0x34, 0x1c, 0xda, - 0x59, 0x12, 0x44, 0xb3, 0xc1, 0x6f, 0x24, 0x28, 0x5a, 0xff, 0x2e, 0x01, 0x18, 0x61, 0xbb, 0xb6, - 0xa5, 0xe9, 0x94, 0xe5, 0xef, 0xeb, 0xa0, 0x42, 0x47, 0x0e, 0xe1, 0xd3, 0x30, 0xdf, 0xb9, 0x11, - 0x7a, 0x71, 0x7f, 0xe4, 0x90, 0x8f, 0x4f, 0x9a, 0x6b, 0x79, 0x0b, 0xd6, 0x83, 0xb8, 0x0d, 0xdc, - 0x8e, 0xfc, 0x2b, 0x71, 0xeb, 0xbb, 0xe9, 0xa1, 0x3f, 0x3e, 0x69, 0x4a, 0x0e, 0x0b, 0x35, 0x62, - 0x4a, 0x3b, 0x08, 0x87, 0x00, 0x1a, 0xd8, 0xa3, 0xf7, 0x5d, 0x6c, 0x79, 0xc1, 0x48, 0xba, 0x49, - 0x44, 0xe4, 0x2f, 0x4d, 0xb7, 0x3c, 0xcc, 0xa2, 0x73, 0x55, 0x78, 0x01, 0xb7, 0x73, 0x6c, 0x48, - 0x32, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0x78, 0x14, 0xd1, 0x04, 0x22, - 0xde, 0x8a, 0x44, 0x2f, 0x7c, 0x11, 0xcc, 0x99, 0xc4, 0xf3, 0xf0, 0x80, 0xd4, 0xab, 0x1c, 0xb8, - 0x2c, 0x80, 0x73, 0xbd, 0xa0, 0x19, 0x85, 0xfd, 0xad, 0x3f, 0x28, 0x60, 0x31, 0x9a, 0xb9, 0x0b, - 0xd8, 0x2a, 0x9d, 0xf4, 0x56, 0x79, 0xfe, 0xd4, 0x3c, 0x29, 0xd8, 0x21, 0xef, 0x95, 0x13, 0x3e, - 0xb3, 0x24, 0x84, 0x3f, 0x03, 0x35, 0x8f, 0x18, 0xa4, 0x4f, 0x6d, 0x57, 0xf8, 0xfc, 0xca, 0x94, - 0x3e, 0xe3, 0x43, 0x62, 0xec, 0x0b, 0xd3, 0xce, 0x25, 0xe6, 0x74, 0xf8, 0x0b, 0x45, 0x94, 0xf0, - 0x27, 0xa0, 0x46, 0x89, 0xe9, 0x18, 0x98, 0x12, 0xb1, 0x4d, 0x52, 0xf9, 0xcd, 0xd2, 0x85, 0x91, - 0xed, 0xd9, 0xda, 0x7d, 0x01, 0xe3, 0x1b, 0x25, 0x9a, 0x87, 0xb0, 0x15, 0x45, 0x34, 0xf0, 0x18, - 0x2c, 0xf9, 0x8e, 0xc6, 0x90, 0x94, 0x1d, 0xdd, 0x83, 0x91, 0x48, 0x9f, 0x9b, 0xa7, 0x4e, 0xc8, - 0x41, 0xca, 0xa4, 0xb3, 0x26, 0x06, 0x58, 0x4a, 0xb7, 0xa3, 0x0c, 0x35, 0xdc, 0x04, 0xcb, 0xa6, - 0x6e, 0x21, 0x82, 0xb5, 0xd1, 0x3e, 0xe9, 0xdb, 0x96, 0xe6, 0xf1, 0x04, 0xaa, 0x76, 0xd6, 0x05, - 0xc1, 0x72, 0x2f, 0xdd, 0x8d, 0xb2, 0x78, 0xb8, 0x0d, 0x56, 0xc3, 0x73, 0xf6, 0x47, 0xba, 0x47, - 0x6d, 0x77, 0xb4, 0xad, 0x9b, 0x3a, 0xad, 0xcf, 0x72, 0x9e, 0xfa, 0xf8, 0xa4, 0xb9, 0x8a, 0x24, - 0xfd, 0x48, 0x6a, 0xd5, 0xfa, 0xed, 0x2c, 0x58, 0xce, 0x9c, 0x06, 0xf0, 0x01, 0x58, 0xeb, 0xfb, - 0xae, 0x4b, 0x2c, 0xba, 0xe3, 0x9b, 0x87, 0xc4, 0xdd, 0xef, 0x1f, 0x11, 0xcd, 0x37, 0x88, 0xc6, - 0x57, 0xb4, 0xda, 0x69, 0x08, 0x5f, 0xd7, 0xba, 0x52, 0x14, 0x2a, 0xb0, 0x86, 0x3f, 0x06, 0xd0, - 0xe2, 0x4d, 0x3d, 0xdd, 0xf3, 0x22, 0xce, 0x12, 0xe7, 0x8c, 0x36, 0xe0, 0x4e, 0x0e, 0x81, 0x24, - 0x56, 0xcc, 0x47, 0x8d, 0x78, 0xba, 0x4b, 0xb4, 0xac, 0x8f, 0xe5, 0xb4, 0x8f, 0x5b, 0x52, 0x14, - 0x2a, 0xb0, 0x86, 0xaf, 0x82, 0x85, 0x60, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0x56, 0x04, 0xd9, 0xc2, - 0x4e, 0xdc, 0x85, 0x92, 0x38, 0x16, 0x9a, 0x7d, 0xe8, 0x11, 0x77, 0x48, 0xb4, 0x37, 0x03, 0x0d, - 0xc0, 0x0a, 0x65, 0x95, 0x17, 0xca, 0x28, 0xb4, 0xdd, 0x1c, 0x02, 0x49, 0xac, 0x58, 0x68, 0x41, - 0xd6, 0xe4, 0x42, 0x9b, 0x4d, 0x87, 0x76, 0x20, 0x45, 0xa1, 0x02, 0x6b, 0x96, 0x7b, 0x81, 0xcb, - 0x9b, 0x43, 0xac, 0x1b, 0xf8, 0xd0, 0x20, 0xf5, 0xb9, 0x74, 0xee, 0xed, 0xa4, 0xbb, 0x51, 0x16, - 0x0f, 0xdf, 0x04, 0x57, 0x82, 0xa6, 0x03, 0x0b, 0x47, 0x24, 0x35, 0x4e, 0xf2, 0x9c, 0x20, 0xb9, - 0xb2, 0x93, 0x05, 0xa0, 0xbc, 0x0d, 0x7c, 0x1d, 0x2c, 0xf5, 0x6d, 0xc3, 0xe0, 0xf9, 0xd8, 0xb5, - 0x7d, 0x8b, 0xd6, 0xe7, 0x39, 0x0b, 0x64, 0x7b, 0xa8, 0x9b, 0xea, 0x41, 0x19, 0x24, 0x7c, 0x08, - 0x40, 0x3f, 0x2c, 0x07, 0x5e, 0x1d, 0x14, 0x17, 0xfa, 0x7c, 0x1d, 0x8a, 0x0b, 0x70, 0xd4, 0xe4, - 0xa1, 0x04, 0x5b, 0xeb, 0x3d, 0x05, 0xac, 0x17, 0xec, 0x71, 0xf8, 0x83, 0x54, 0xd5, 0xbb, 0x99, - 0xa9, 0x7a, 0xd7, 0x0a, 0xcc, 0x12, 0xa5, 0xaf, 0x0f, 0x16, 0x99, 0xee, 0xd0, 0xad, 0x41, 0x00, - 0x11, 0x27, 0xd8, 0x4b, 0x32, 0xdf, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, 0xcd, 0xc5, - 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x5d, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, 0x89, 0x75, - 0x11, 0xaa, 0x65, 0x2b, 0xa5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xfc, 0x29, 0x94, 0x2d, 0xdb, 0x19, - 0xd9, 0xf2, 0xad, 0x09, 0x3c, 0xa7, 0xeb, 0x96, 0x7f, 0x96, 0xc1, 0x4a, 0x0c, 0x8e, 0x85, 0xcb, - 0xbd, 0xd4, 0x12, 0xbe, 0x90, 0x59, 0xc2, 0x75, 0x89, 0xc9, 0xa7, 0xa6, 0x5c, 0xde, 0x05, 0x4b, - 0x4c, 0x57, 0x04, 0xab, 0xc6, 0x55, 0xcb, 0xec, 0x99, 0x55, 0x4b, 0x54, 0x75, 0xb6, 0x53, 0x4c, - 0x28, 0xc3, 0x5c, 0xa0, 0x92, 0xe6, 0xbe, 0x88, 0x2a, 0xe9, 0x8f, 0x0a, 0x58, 0x8a, 0x97, 0xe9, - 0x02, 0x64, 0x52, 0x37, 0x2d, 0x93, 0x1a, 0xa7, 0xe7, 0x65, 0x81, 0x4e, 0xfa, 0x47, 0x25, 0xe9, - 0x35, 0x17, 0x4a, 0x1b, 0xec, 0x83, 0xca, 0x31, 0xf4, 0x3e, 0xf6, 0x44, 0x59, 0xbd, 0x14, 0x7c, - 0x4c, 0x05, 0x6d, 0x28, 0xea, 0x4d, 0x49, 0xaa, 0xd2, 0xa7, 0x2b, 0xa9, 0xca, 0x9f, 0x8c, 0xa4, - 0xba, 0x0f, 0x6a, 0x5e, 0x28, 0xa6, 0x2a, 0x9c, 0xf2, 0xc6, 0xa4, 0xed, 0x2c, 0x74, 0x54, 0xc4, - 0x1a, 0x29, 0xa8, 0x88, 0x49, 0xa6, 0x9d, 0xaa, 0x9f, 0xa5, 0x76, 0x62, 0xe9, 0xed, 0x60, 0xdf, - 0x23, 0x1a, 0xdf, 0x4a, 0xb5, 0x38, 0xbd, 0xf7, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x00, 0xac, 0x3b, - 0xae, 0x3d, 0x70, 0x89, 0xe7, 0x6d, 0x11, 0xac, 0x19, 0xba, 0x45, 0xc2, 0x00, 0x82, 0xaa, 0x77, - 0x6d, 0x7c, 0xd2, 0x5c, 0xdf, 0x93, 0x43, 0x50, 0x91, 0x6d, 0xeb, 0x2f, 0x15, 0x70, 0x39, 0x7b, - 0x22, 0x16, 0x08, 0x11, 0xe5, 0x5c, 0x42, 0xe4, 0xe5, 0x44, 0x8a, 0x06, 0x2a, 0x2d, 0xf1, 0xcd, - 0x9f, 0x4b, 0xd3, 0x4d, 0xb0, 0x2c, 0x84, 0x47, 0xd8, 0x29, 0xa4, 0x58, 0xb4, 0x3c, 0x07, 0xe9, - 0x6e, 0x94, 0xc5, 0xc3, 0x7b, 0x60, 0xd1, 0xe5, 0xda, 0x2a, 0x24, 0x08, 0xf4, 0xc9, 0xd7, 0x04, - 0xc1, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x96, 0x69, 0x93, 0x58, 0x72, 0x84, 0x04, 0x95, 0xb4, 0x36, - 0xd9, 0xcc, 0x02, 0x50, 0xde, 0x06, 0xf6, 0xc0, 0x8a, 0x6f, 0xe5, 0xa9, 0x82, 0x5c, 0xbb, 0x26, - 0xa8, 0x56, 0x0e, 0xf2, 0x10, 0x24, 0xb3, 0x83, 0x3f, 0x4d, 0xc9, 0x95, 0x59, 0x7e, 0x8a, 0xbc, - 0x70, 0xfa, 0x76, 0x98, 0x5a, 0xaf, 0x48, 0x74, 0x54, 0x6d, 0x5a, 0x1d, 0xd5, 0xfa, 0xb3, 0x02, - 0x60, 0x7e, 0x0b, 0x4e, 0xfc, 0xb8, 0xcf, 0x59, 0x24, 0x4a, 0xa4, 0x26, 0x57, 0x38, 0x37, 0x27, - 0x2b, 0x9c, 0xf8, 0x04, 0x9d, 0x4e, 0xe2, 0x88, 0xe9, 0xbd, 0x98, 0x8b, 0x99, 0x29, 0x24, 0x4e, - 0xec, 0xcf, 0xb3, 0x49, 0x9c, 0x04, 0xcf, 0xe9, 0x12, 0xe7, 0x3f, 0x25, 0xb0, 0x12, 0x83, 0xa7, - 0x96, 0x38, 0x12, 0x93, 0xaf, 0x2e, 0x67, 0xa6, 0x93, 0x1d, 0xf1, 0xd4, 0x7d, 0x4e, 0x64, 0x47, - 0xec, 0x50, 0x81, 0xec, 0xf8, 0x7d, 0x29, 0xe9, 0xf5, 0x19, 0x65, 0xc7, 0x27, 0x70, 0x55, 0xf1, - 0x85, 0x53, 0x2e, 0xad, 0xbf, 0x96, 0xc1, 0xe5, 0xec, 0x16, 0x4c, 0xd5, 0x41, 0x65, 0x62, 0x1d, - 0xdc, 0x03, 0xab, 0x8f, 0x7c, 0xc3, 0x18, 0xf1, 0x18, 0x12, 0xc5, 0x30, 0xa8, 0xa0, 0x5f, 0x17, - 0x96, 0xab, 0x3f, 0x94, 0x60, 0x90, 0xd4, 0x32, 0x5f, 0x16, 0x2b, 0xcf, 0x5a, 0x16, 0xab, 0xe7, - 0x28, 0x8b, 0x72, 0x65, 0x51, 0x3e, 0x97, 0xb2, 0x98, 0xba, 0x26, 0x4a, 0x8e, 0xab, 0x89, 0xdf, - 0xf0, 0x63, 0x05, 0xac, 0xc9, 0x3f, 0x9f, 0xa1, 0x01, 0x96, 0x4c, 0xfc, 0x38, 0x79, 0x79, 0x31, - 0xa9, 0x60, 0xf8, 0x54, 0x37, 0xd4, 0xe0, 0x75, 0x47, 0x7d, 0xcb, 0xa2, 0xbb, 0xee, 0x3e, 0x75, - 0x75, 0x6b, 0x10, 0x14, 0xd8, 0x5e, 0x8a, 0x0b, 0x65, 0xb8, 0xe1, 0x43, 0x50, 0x33, 0xf1, 0xe3, - 0x7d, 0xdf, 0x1d, 0x84, 0x85, 0xf0, 0xec, 0xe3, 0xf0, 0xdc, 0xef, 0x09, 0x16, 0x14, 0xf1, 0xb5, - 0x3e, 0x54, 0xc0, 0x7a, 0x41, 0x05, 0xfd, 0x12, 0x45, 0xb9, 0x0b, 0xae, 0xa7, 0x82, 0x64, 0x1b, - 0x92, 0x3c, 0xf2, 0x0d, 0xbe, 0x37, 0x85, 0x5e, 0xb9, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x12, - 0xba, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0x9b, 0x12, - 0x58, 0x48, 0x90, 0x5c, 0x80, 0x76, 0x78, 0x23, 0xa5, 0x1d, 0xa4, 0xaf, 0x31, 0xc9, 0xa8, 0x8a, - 0xc4, 0x43, 0x2f, 0x23, 0x1e, 0xbe, 0x3d, 0x89, 0xe8, 0x74, 0xf5, 0xf0, 0x51, 0x09, 0xac, 0x26, - 0xd0, 0xb1, 0x7c, 0xf8, 0x5e, 0x4a, 0x3e, 0x6c, 0x64, 0xe4, 0x43, 0x5d, 0x66, 0xf3, 0x95, 0x7e, - 0x98, 0xac, 0x1f, 0xfe, 0xa4, 0x80, 0xe5, 0xc4, 0xdc, 0x5d, 0x80, 0x80, 0xd8, 0x4a, 0x0b, 0x88, - 0xe6, 0x84, 0x7c, 0x29, 0x50, 0x10, 0x4f, 0xaa, 0x29, 0xbf, 0xbf, 0xf4, 0x37, 0x17, 0xbf, 0x04, - 0xab, 0x43, 0xdb, 0xf0, 0x4d, 0xd2, 0x35, 0xb0, 0x6e, 0x86, 0x00, 0x56, 0x71, 0xd9, 0x24, 0xbe, - 0x28, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, 0x6c, 0x19, 0xd7, 0xf9, 0x07, 0x12, - 0x3a, 0x24, 0x1d, 0x04, 0xbe, 0x0a, 0x16, 0x58, 0xa5, 0xd4, 0xfb, 0x64, 0x07, 0x9b, 0x61, 0x4e, - 0x45, 0x6f, 0x0f, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, - 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, 0x77, 0x1a, 0xf3, 0x9d, 0xd7, 0xc2, - 0xef, 0xd5, 0xbd, 0x3c, 0x84, 0x7d, 0x0f, 0x48, 0x9a, 0xf9, 0x7e, 0x96, 0x51, 0x42, 0x33, 0xf7, - 0x54, 0x36, 0x97, 0xfb, 0xff, 0x02, 0x59, 0x72, 0x9d, 0xf3, 0xb1, 0xac, 0xe8, 0xb6, 0xa6, 0x76, - 0xae, 0x97, 0xae, 0x8f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, 0x0c, 0xef, 0x4b, 0x72, 0xaa, 0xae, - 0x7c, 0x06, 0x55, 0xb7, 0x09, 0x96, 0xc5, 0x23, 0x5b, 0x46, 0x14, 0x46, 0xe2, 0xbc, 0x9b, 0xee, - 0x46, 0x59, 0xbc, 0xec, 0xbe, 0xa6, 0x7a, 0xc6, 0xfb, 0x9a, 0xa4, 0x17, 0xe2, 0x7f, 0x43, 0x82, - 0xac, 0xcb, 0x7b, 0x21, 0xfe, 0x45, 0x24, 0x8b, 0x87, 0xdf, 0x0f, 0x53, 0x2a, 0x62, 0x98, 0xe3, - 0x0c, 0x99, 0x1c, 0x89, 0x08, 0x32, 0xe8, 0x67, 0x7a, 0x48, 0x7a, 0x47, 0xf2, 0x90, 0xb4, 0x31, - 0x21, 0x95, 0xa7, 0x97, 0xa1, 0x7f, 0x57, 0xc0, 0x73, 0x85, 0x7b, 0x00, 0x6e, 0xa6, 0xea, 0xec, - 0xad, 0x4c, 0x9d, 0x7d, 0xbe, 0xd0, 0x30, 0x51, 0x6c, 0x4d, 0xf9, 0x65, 0xcb, 0xdd, 0x89, 0x97, - 0x2d, 0x12, 0x15, 0x35, 0xf9, 0xd6, 0xa5, 0xb3, 0xf1, 0xe4, 0x69, 0x63, 0xe6, 0xfd, 0xa7, 0x8d, - 0x99, 0x0f, 0x9e, 0x36, 0x66, 0x7e, 0x35, 0x6e, 0x28, 0x4f, 0xc6, 0x0d, 0xe5, 0xfd, 0x71, 0x43, - 0xf9, 0x60, 0xdc, 0x50, 0xfe, 0x35, 0x6e, 0x28, 0xbf, 0xfb, 0xb0, 0x31, 0xf3, 0xb0, 0x34, 0xbc, - 0xfd, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xc9, 0x16, 0xd9, 0x6e, 0x26, 0x00, 0x00, + // 2149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1b, 0xc7, + 0x19, 0xd7, 0xf2, 0x21, 0x51, 0x23, 0x4b, 0xb2, 0x47, 0xaa, 0xc4, 0xd8, 0x0d, 0xe9, 0xb2, 0xae, + 0xa3, 0xc4, 0x31, 0x59, 0x3b, 0x4e, 0x50, 0xc4, 0x45, 0x02, 0x91, 0x4a, 0xd3, 0x34, 0x7a, 0x75, + 0x64, 0x39, 0x80, 0x9b, 0x16, 0x1d, 0x2d, 0xc7, 0xd4, 0x46, 0xfb, 0xc2, 0xee, 0x2c, 0x63, 0xa2, + 0x97, 0xa2, 0x40, 0x6f, 0x3d, 0xf4, 0x3f, 0x29, 0x8a, 0xa2, 0xb9, 0x15, 0x41, 0xd0, 0x8b, 0x2f, + 0x45, 0x83, 0x5e, 0x9a, 0x13, 0x51, 0x33, 0xa7, 0xa2, 0xe8, 0xad, 0xbd, 0xf8, 0xd2, 0x62, 0x66, + 0x67, 0xdf, 0xb3, 0x22, 0x25, 0x27, 0xca, 0x03, 0xbe, 0x89, 0x33, 0xbf, 0xef, 0x37, 0xdf, 0xcc, + 0x7c, 0xdf, 0x7c, 0xbf, 0x99, 0x15, 0xb8, 0x7d, 0xf4, 0x3d, 0xb7, 0xa9, 0x59, 0xad, 0x23, 0xef, + 0x80, 0x38, 0x26, 0xa1, 0xc4, 0x6d, 0xf5, 0x89, 0xd9, 0xb5, 0x9c, 0x96, 0xe8, 0xc0, 0xb6, 0xd6, + 0xc2, 0xb6, 0xed, 0xb6, 0xfa, 0x37, 0x5a, 0x3d, 0x62, 0x12, 0x07, 0x53, 0xd2, 0x6d, 0xda, 0x8e, + 0x45, 0x2d, 0x08, 0x7d, 0x4c, 0x13, 0xdb, 0x5a, 0x93, 0x61, 0x9a, 0xfd, 0x1b, 0x17, 0xaf, 0xf7, + 0x34, 0x7a, 0xe8, 0x1d, 0x34, 0x55, 0xcb, 0x68, 0xf5, 0xac, 0x9e, 0xd5, 0xe2, 0xd0, 0x03, 0xef, + 0x3e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x7c, 0x8a, 0x8b, 0x8d, 0xd8, 0x30, 0xaa, 0xe5, 0x10, 0xc9, + 0x30, 0x17, 0x6f, 0x45, 0x18, 0x03, 0xab, 0x87, 0x9a, 0x49, 0x9c, 0x41, 0xcb, 0x3e, 0xea, 0xb1, + 0x06, 0xb7, 0x65, 0x10, 0x8a, 0x65, 0x56, 0xad, 0x3c, 0x2b, 0xc7, 0x33, 0xa9, 0x66, 0x90, 0x8c, + 0xc1, 0x2b, 0xe3, 0x0c, 0x5c, 0xf5, 0x90, 0x18, 0x38, 0x63, 0xf7, 0x52, 0x9e, 0x9d, 0x47, 0x35, + 0xbd, 0xa5, 0x99, 0xd4, 0xa5, 0x4e, 0xda, 0xa8, 0xf1, 0x5f, 0x05, 0xc0, 0x8e, 0x65, 0x52, 0xc7, + 0xd2, 0x75, 0xe2, 0x20, 0xd2, 0xd7, 0x5c, 0xcd, 0x32, 0xe1, 0xcf, 0x41, 0x85, 0xcd, 0xa7, 0x8b, + 0x29, 0xae, 0x2a, 0x97, 0x95, 0xb5, 0xb9, 0x9b, 0xdf, 0x6d, 0x46, 0x8b, 0x1c, 0xd2, 0x37, 0xed, + 0xa3, 0x1e, 0x6b, 0x70, 0x9b, 0x0c, 0xdd, 0xec, 0xdf, 0x68, 0xee, 0x1c, 0xbc, 0x47, 0x54, 0xba, + 0x45, 0x28, 0x6e, 0xc3, 0x87, 0xc3, 0xfa, 0xd4, 0x68, 0x58, 0x07, 0x51, 0x1b, 0x0a, 0x59, 0xe1, + 0x0e, 0x28, 0x71, 0xf6, 0x02, 0x67, 0xbf, 0x9e, 0xcb, 0x2e, 0x26, 0xdd, 0x44, 0xf8, 0xfd, 0x37, + 0x1e, 0x50, 0x62, 0x32, 0xf7, 0xda, 0xe7, 0x04, 0x75, 0x69, 0x03, 0x53, 0x8c, 0x38, 0x11, 0x7c, + 0x11, 0x54, 0x1c, 0xe1, 0x7e, 0xb5, 0x78, 0x59, 0x59, 0x2b, 0xb6, 0xcf, 0x0b, 0x54, 0x25, 0x98, + 0x16, 0x0a, 0x11, 0x8d, 0x3f, 0x2b, 0x60, 0x25, 0x3b, 0xef, 0x4d, 0xcd, 0xa5, 0xf0, 0xdd, 0xcc, + 0xdc, 0x9b, 0x93, 0xcd, 0x9d, 0x59, 0xf3, 0x99, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0x79, 0xbf, 0x0d, + 0xca, 0x1a, 0x25, 0x86, 0x5b, 0x2d, 0x5c, 0x2e, 0xae, 0xcd, 0xdd, 0xbc, 0xda, 0xcc, 0xc6, 0x6e, + 0x33, 0xeb, 0x58, 0x7b, 0x5e, 0x50, 0x96, 0xdf, 0x62, 0xc6, 0xc8, 0xe7, 0x68, 0xfc, 0x4f, 0x01, + 0xb3, 0x1b, 0x98, 0x18, 0x96, 0xb9, 0x47, 0xe8, 0x19, 0x6c, 0x5a, 0x07, 0x94, 0x5c, 0x9b, 0xa8, + 0x62, 0xd3, 0xbe, 0x25, 0xf3, 0x3d, 0x74, 0x67, 0xcf, 0x26, 0x6a, 0xb4, 0x51, 0xec, 0x17, 0xe2, + 0xc6, 0xf0, 0x6d, 0x30, 0xed, 0x52, 0x4c, 0x3d, 0x97, 0x6f, 0xd3, 0xdc, 0xcd, 0x6f, 0x1f, 0x4f, + 0xc3, 0xa1, 0xed, 0x05, 0x41, 0x34, 0xed, 0xff, 0x46, 0x82, 0xa2, 0xf1, 0xcf, 0x02, 0x80, 0x21, + 0xb6, 0x63, 0x99, 0x5d, 0x8d, 0xb2, 0xf8, 0x7d, 0x15, 0x94, 0xe8, 0xc0, 0x26, 0x7c, 0x19, 0x66, + 0xdb, 0x57, 0x03, 0x2f, 0xee, 0x0c, 0x6c, 0xf2, 0x78, 0x58, 0x5f, 0xc9, 0x5a, 0xb0, 0x1e, 0xc4, + 0x6d, 0xe0, 0x66, 0xe8, 0x5f, 0x81, 0x5b, 0xdf, 0x4a, 0x0e, 0xfd, 0x78, 0x58, 0x97, 0x1c, 0x16, + 0xcd, 0x90, 0x29, 0xe9, 0x20, 0xec, 0x03, 0xa8, 0x63, 0x97, 0xde, 0x71, 0xb0, 0xe9, 0xfa, 0x23, + 0x69, 0x06, 0x11, 0x33, 0x7f, 0x61, 0xb2, 0xed, 0x61, 0x16, 0xed, 0x8b, 0xc2, 0x0b, 0xb8, 0x99, + 0x61, 0x43, 0x92, 0x11, 0xe0, 0x55, 0x30, 0xed, 0x10, 0xec, 0x5a, 0x66, 0xb5, 0xc4, 0x67, 0x11, + 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0xe7, 0xc1, 0x8c, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xb5, + 0xcc, 0x81, 0x8b, 0x02, 0x38, 0xb3, 0xe5, 0x37, 0xa3, 0xa0, 0xbf, 0xf1, 0x7b, 0x05, 0xcc, 0x87, + 0x2b, 0x77, 0x06, 0xa9, 0xd2, 0x4e, 0xa6, 0xca, 0xb3, 0xc7, 0xc6, 0x49, 0x4e, 0x86, 0x7c, 0x58, + 0x8c, 0xf9, 0xcc, 0x82, 0x10, 0xfe, 0x14, 0x54, 0x5c, 0xa2, 0x13, 0x95, 0x5a, 0x8e, 0xf0, 0xf9, + 0xa5, 0x09, 0x7d, 0xc6, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0x73, 0xcc, 0xe9, 0xe0, 0x17, 0x0a, + 0x29, 0xe1, 0x8f, 0x41, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x22, 0x4d, 0x12, 0xf1, 0xcd, 0xc2, + 0x85, 0x91, 0xed, 0x5a, 0xdd, 0x3b, 0x02, 0xc6, 0x13, 0x25, 0x5c, 0x87, 0xa0, 0x15, 0x85, 0x34, + 0xf0, 0x08, 0x2c, 0x78, 0x76, 0x97, 0x21, 0x29, 0x3b, 0xba, 0x7b, 0x03, 0x11, 0x3e, 0xd7, 0x8e, + 0x5d, 0x90, 0xfd, 0x84, 0x49, 0x7b, 0x45, 0x0c, 0xb0, 0x90, 0x6c, 0x47, 0x29, 0x6a, 0xb8, 0x0e, + 0x16, 0x0d, 0xcd, 0x44, 0x04, 0x77, 0x07, 0x7b, 0x44, 0xb5, 0xcc, 0xae, 0xcb, 0x03, 0xa8, 0xdc, + 0x5e, 0x15, 0x04, 0x8b, 0x5b, 0xc9, 0x6e, 0x94, 0xc6, 0xc3, 0x4d, 0xb0, 0x1c, 0x9c, 0xb3, 0x3f, + 0xd4, 0x5c, 0x6a, 0x39, 0x83, 0x4d, 0xcd, 0xd0, 0x68, 0x75, 0x9a, 0xf3, 0x54, 0x47, 0xc3, 0xfa, + 0x32, 0x92, 0xf4, 0x23, 0xa9, 0x55, 0xe3, 0x37, 0xd3, 0x60, 0x31, 0x75, 0x1a, 0xc0, 0xbb, 0x60, + 0x45, 0xf5, 0x1c, 0x87, 0x98, 0x74, 0xdb, 0x33, 0x0e, 0x88, 0xb3, 0xa7, 0x1e, 0x92, 0xae, 0xa7, + 0x93, 0x2e, 0xdf, 0xd1, 0x72, 0xbb, 0x26, 0x7c, 0x5d, 0xe9, 0x48, 0x51, 0x28, 0xc7, 0x1a, 0xfe, + 0x08, 0x40, 0x93, 0x37, 0x6d, 0x69, 0xae, 0x1b, 0x72, 0x16, 0x38, 0x67, 0x98, 0x80, 0xdb, 0x19, + 0x04, 0x92, 0x58, 0x31, 0x1f, 0xbb, 0xc4, 0xd5, 0x1c, 0xd2, 0x4d, 0xfb, 0x58, 0x4c, 0xfa, 0xb8, + 0x21, 0x45, 0xa1, 0x1c, 0x6b, 0xf8, 0x32, 0x98, 0xf3, 0x47, 0xe3, 0x6b, 0x2e, 0x36, 0x67, 0x49, + 0x90, 0xcd, 0x6d, 0x47, 0x5d, 0x28, 0x8e, 0x63, 0x53, 0xb3, 0x0e, 0x5c, 0xe2, 0xf4, 0x49, 0xf7, + 0x4d, 0x5f, 0x03, 0xb0, 0x42, 0x59, 0xe6, 0x85, 0x32, 0x9c, 0xda, 0x4e, 0x06, 0x81, 0x24, 0x56, + 0x6c, 0x6a, 0x7e, 0xd4, 0x64, 0xa6, 0x36, 0x9d, 0x9c, 0xda, 0xbe, 0x14, 0x85, 0x72, 0xac, 0x59, + 0xec, 0xf9, 0x2e, 0xaf, 0xf7, 0xb1, 0xa6, 0xe3, 0x03, 0x9d, 0x54, 0x67, 0x92, 0xb1, 0xb7, 0x9d, + 0xec, 0x46, 0x69, 0x3c, 0x7c, 0x13, 0x5c, 0xf0, 0x9b, 0xf6, 0x4d, 0x1c, 0x92, 0x54, 0x38, 0xc9, + 0x33, 0x82, 0xe4, 0xc2, 0x76, 0x1a, 0x80, 0xb2, 0x36, 0xf0, 0x55, 0xb0, 0xa0, 0x5a, 0xba, 0xce, + 0xe3, 0xb1, 0x63, 0x79, 0x26, 0xad, 0xce, 0x72, 0x16, 0xc8, 0x72, 0xa8, 0x93, 0xe8, 0x41, 0x29, + 0x24, 0xbc, 0x07, 0x80, 0x1a, 0x94, 0x03, 0xb7, 0x0a, 0xf2, 0x0b, 0x7d, 0xb6, 0x0e, 0x45, 0x05, + 0x38, 0x6c, 0x72, 0x51, 0x8c, 0xad, 0xf1, 0xa1, 0x02, 0x56, 0x73, 0x72, 0x1c, 0xbe, 0x9e, 0xa8, + 0x7a, 0xd7, 0x52, 0x55, 0xef, 0x52, 0x8e, 0x59, 0xac, 0xf4, 0xa9, 0x60, 0x9e, 0xe9, 0x0e, 0xcd, + 0xec, 0xf9, 0x10, 0x71, 0x82, 0xbd, 0x20, 0xf3, 0x1d, 0xc5, 0x81, 0xd1, 0x31, 0x7c, 0x61, 0x34, + 0xac, 0xcf, 0x27, 0xfa, 0x50, 0x92, 0xb3, 0xf1, 0xab, 0x02, 0x00, 0x1b, 0xc4, 0xd6, 0xad, 0x81, + 0x41, 0xcc, 0xb3, 0x50, 0x2d, 0x1b, 0x09, 0xd5, 0xd2, 0x90, 0x6e, 0x44, 0xe8, 0x4f, 0xae, 0x6c, + 0xd9, 0x4c, 0xc9, 0x96, 0x2b, 0x63, 0x78, 0x8e, 0xd7, 0x2d, 0x7f, 0x2f, 0x82, 0xa5, 0x08, 0x1c, + 0x09, 0x97, 0xdb, 0x89, 0x2d, 0x7c, 0x2e, 0xb5, 0x85, 0xab, 0x12, 0x93, 0xcf, 0x4d, 0xb9, 0xbc, + 0x07, 0x16, 0x98, 0xae, 0xf0, 0x77, 0x8d, 0xab, 0x96, 0xe9, 0x13, 0xab, 0x96, 0xb0, 0xea, 0x6c, + 0x26, 0x98, 0x50, 0x8a, 0x39, 0x47, 0x25, 0xcd, 0x7c, 0x15, 0x55, 0xd2, 0x1f, 0x14, 0xb0, 0x10, + 0x6d, 0xd3, 0x19, 0xc8, 0xa4, 0x4e, 0x52, 0x26, 0xd5, 0x8e, 0x8f, 0xcb, 0x1c, 0x9d, 0xf4, 0xb7, + 0x52, 0xdc, 0x6b, 0x2e, 0x94, 0xd6, 0xd8, 0x85, 0xca, 0xd6, 0x35, 0x15, 0xbb, 0xa2, 0xac, 0x9e, + 0xf3, 0x2f, 0x53, 0x7e, 0x1b, 0x0a, 0x7b, 0x13, 0x92, 0xaa, 0xf0, 0xf9, 0x4a, 0xaa, 0xe2, 0x67, + 0x23, 0xa9, 0xee, 0x80, 0x8a, 0x1b, 0x88, 0xa9, 0x12, 0xa7, 0xbc, 0x3a, 0x2e, 0x9d, 0x85, 0x8e, + 0x0a, 0x59, 0x43, 0x05, 0x15, 0x32, 0xc9, 0xb4, 0x53, 0xf9, 0x8b, 0xd4, 0x4e, 0x2c, 0xbc, 0x6d, + 0xec, 0xb9, 0xa4, 0xcb, 0x53, 0xa9, 0x12, 0x85, 0xf7, 0x2e, 0x6f, 0x45, 0xa2, 0x17, 0xee, 0x83, + 0x55, 0xdb, 0xb1, 0x7a, 0x0e, 0x71, 0xdd, 0x0d, 0x82, 0xbb, 0xba, 0x66, 0x92, 0x60, 0x02, 0x7e, + 0xd5, 0xbb, 0x34, 0x1a, 0xd6, 0x57, 0x77, 0xe5, 0x10, 0x94, 0x67, 0xdb, 0xf8, 0x53, 0x09, 0x9c, + 0x4f, 0x9f, 0x88, 0x39, 0x42, 0x44, 0x39, 0x95, 0x10, 0x79, 0x31, 0x16, 0xa2, 0xbe, 0x4a, 0x8b, + 0xdd, 0xf9, 0x33, 0x61, 0xba, 0x0e, 0x16, 0x85, 0xf0, 0x08, 0x3a, 0x85, 0x14, 0x0b, 0xb7, 0x67, + 0x3f, 0xd9, 0x8d, 0xd2, 0x78, 0x78, 0x1b, 0xcc, 0x3b, 0x5c, 0x5b, 0x05, 0x04, 0xbe, 0x3e, 0xf9, + 0x86, 0x20, 0x98, 0x47, 0xf1, 0x4e, 0x94, 0xc4, 0x32, 0x6d, 0x12, 0x49, 0x8e, 0x80, 0xa0, 0x94, + 0xd4, 0x26, 0xeb, 0x69, 0x00, 0xca, 0xda, 0xc0, 0x2d, 0xb0, 0xe4, 0x99, 0x59, 0x2a, 0x3f, 0xd6, + 0x2e, 0x09, 0xaa, 0xa5, 0xfd, 0x2c, 0x04, 0xc9, 0xec, 0xe0, 0x4f, 0x12, 0x72, 0x65, 0x9a, 0x9f, + 0x22, 0xcf, 0x1d, 0x9f, 0x0e, 0x13, 0xeb, 0x15, 0x89, 0x8e, 0xaa, 0x4c, 0xaa, 0xa3, 0x1a, 0x1f, + 0x28, 0x00, 0x66, 0x53, 0x70, 0xec, 0xe5, 0x3e, 0x63, 0x11, 0x2b, 0x91, 0x5d, 0xb9, 0xc2, 0xb9, + 0x36, 0x5e, 0xe1, 0x44, 0x27, 0xe8, 0x64, 0x12, 0x47, 0x2c, 0xef, 0xd9, 0x3c, 0xcc, 0x4c, 0x20, + 0x71, 0x22, 0x7f, 0x9e, 0x4c, 0xe2, 0xc4, 0x78, 0x8e, 0x97, 0x38, 0xff, 0x2a, 0x80, 0xa5, 0x08, + 0x3c, 0xb1, 0xc4, 0x91, 0x98, 0x3c, 0x7d, 0x9c, 0x99, 0x4c, 0x76, 0x44, 0x4b, 0xf7, 0x25, 0x91, + 0x1d, 0x91, 0x43, 0x39, 0xb2, 0xe3, 0x77, 0x85, 0xb8, 0xd7, 0x27, 0x94, 0x1d, 0x9f, 0xc1, 0x53, + 0xc5, 0x57, 0x4e, 0xb9, 0x34, 0x3e, 0x2a, 0x82, 0xf3, 0xe9, 0x14, 0x4c, 0xd4, 0x41, 0x65, 0x6c, + 0x1d, 0xdc, 0x05, 0xcb, 0xf7, 0x3d, 0x5d, 0x1f, 0xf0, 0x39, 0xc4, 0x8a, 0xa1, 0x5f, 0x41, 0xbf, + 0x29, 0x2c, 0x97, 0x7f, 0x20, 0xc1, 0x20, 0xa9, 0x65, 0xb6, 0x2c, 0x96, 0x9e, 0xb4, 0x2c, 0x96, + 0x4f, 0x51, 0x16, 0xe5, 0xca, 0xa2, 0x78, 0x2a, 0x65, 0x31, 0x71, 0x4d, 0x94, 0x1c, 0x57, 0x63, + 0xef, 0xf0, 0x23, 0x05, 0xac, 0xc8, 0xaf, 0xcf, 0x50, 0x07, 0x0b, 0x06, 0x7e, 0x10, 0x7f, 0xbc, + 0x18, 0x57, 0x30, 0x3c, 0xaa, 0xe9, 0x4d, 0xff, 0xeb, 0x4e, 0xf3, 0x2d, 0x93, 0xee, 0x38, 0x7b, + 0xd4, 0xd1, 0xcc, 0x9e, 0x5f, 0x60, 0xb7, 0x12, 0x5c, 0x28, 0xc5, 0x0d, 0xef, 0x81, 0x8a, 0x81, + 0x1f, 0xec, 0x79, 0x4e, 0x2f, 0x28, 0x84, 0x27, 0x1f, 0x87, 0xc7, 0xfe, 0x96, 0x60, 0x41, 0x21, + 0x5f, 0xe3, 0x53, 0x05, 0xac, 0xe6, 0x54, 0xd0, 0xaf, 0xd1, 0x2c, 0x77, 0xc0, 0xe5, 0xc4, 0x24, + 0x59, 0x42, 0x92, 0xfb, 0x9e, 0xce, 0x73, 0x53, 0xe8, 0x95, 0x6b, 0x60, 0xd6, 0xc6, 0x0e, 0xd5, + 0x42, 0xa1, 0x5b, 0x6e, 0xcf, 0x8f, 0x86, 0xf5, 0xd9, 0xdd, 0xa0, 0x11, 0x45, 0xfd, 0x8d, 0x5f, + 0x17, 0xc0, 0x5c, 0x8c, 0xe4, 0x0c, 0xb4, 0xc3, 0x1b, 0x09, 0xed, 0x20, 0xfd, 0x1a, 0x13, 0x9f, + 0x55, 0x9e, 0x78, 0xd8, 0x4a, 0x89, 0x87, 0xef, 0x8c, 0x23, 0x3a, 0x5e, 0x3d, 0xfc, 0xbb, 0x00, + 0x96, 0x63, 0xe8, 0x48, 0x3e, 0x7c, 0x3f, 0x21, 0x1f, 0xd6, 0x52, 0xf2, 0xa1, 0x2a, 0xb3, 0x79, + 0xaa, 0x1f, 0xc6, 0xeb, 0x87, 0x3f, 0x2a, 0x60, 0x31, 0xb6, 0x76, 0x67, 0x20, 0x20, 0x36, 0x92, + 0x02, 0xa2, 0x3e, 0x26, 0x5e, 0x72, 0x14, 0xc4, 0x7f, 0x14, 0xd0, 0x8a, 0xa1, 0x76, 0x89, 0xe3, + 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x6b, 0xe9, 0x9e, 0x41, 0x3a, 0x3a, 0xd6, 0x0c, 0x44, 0x58, 0x83, + 0x66, 0x99, 0xbb, 0x96, 0xae, 0xa9, 0x03, 0x88, 0xc1, 0xdc, 0xfb, 0x87, 0xc4, 0xdc, 0x20, 0x3a, + 0xa1, 0xe2, 0x9b, 0xc1, 0x6c, 0xfb, 0xf5, 0xe0, 0x09, 0xfd, 0x9d, 0xa8, 0xeb, 0xf1, 0xb0, 0xbe, + 0x36, 0x09, 0x23, 0x0f, 0xb0, 0x38, 0x27, 0xfc, 0x19, 0x00, 0xec, 0xe7, 0x9e, 0x8a, 0x83, 0x2f, + 0x08, 0xb3, 0xed, 0xd7, 0x82, 0x34, 0x7c, 0x27, 0xec, 0x39, 0xd1, 0x00, 0x31, 0xc6, 0xc6, 0x5f, + 0x67, 0x12, 0xdb, 0xf5, 0xb5, 0x7f, 0xb0, 0xf9, 0x05, 0x58, 0xee, 0x47, 0xab, 0x13, 0x00, 0x98, + 0xd0, 0x60, 0xb1, 0xf3, 0xbc, 0x94, 0x5e, 0xb6, 0xae, 0x91, 0xbc, 0xb9, 0x2b, 0xa1, 0x43, 0xd2, + 0x41, 0xe0, 0xcb, 0x60, 0x8e, 0x09, 0x04, 0x4d, 0x25, 0xdb, 0xd8, 0x08, 0x52, 0x29, 0xfc, 0xe4, + 0xb2, 0x17, 0x75, 0xa1, 0x38, 0x0e, 0x1e, 0x82, 0x25, 0xdb, 0xea, 0x6e, 0x61, 0x13, 0xf7, 0x08, + 0x2b, 0x7b, 0xfe, 0x56, 0xf2, 0xa7, 0x9c, 0xd9, 0xf6, 0x2b, 0xc1, 0x35, 0x7d, 0x37, 0x0b, 0x61, + 0xd7, 0x20, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x25, 0x34, 0x32, 0x5f, 0x08, 0x67, 0x32, 0xff, 0x56, + 0x21, 0xcb, 0xa9, 0x53, 0x7e, 0x23, 0xcc, 0x7b, 0xa4, 0xaa, 0x9c, 0xea, 0x91, 0x4a, 0x22, 0xe3, + 0x67, 0x4f, 0x28, 0xe3, 0x3f, 0x52, 0xc0, 0x15, 0x7b, 0x82, 0x34, 0xaa, 0x02, 0xbe, 0x2c, 0x9d, + 0x31, 0xcb, 0x32, 0x49, 0x46, 0xb6, 0xd7, 0x46, 0xc3, 0xfa, 0x95, 0x49, 0x90, 0x68, 0x22, 0xd7, + 0x1a, 0x1f, 0x94, 0xc1, 0x85, 0x4c, 0x79, 0xfc, 0x02, 0x5f, 0xcb, 0x32, 0x9a, 0xbe, 0x78, 0x02, + 0x4d, 0xbf, 0x0e, 0x16, 0xc5, 0x27, 0xd6, 0xd4, 0x95, 0x20, 0xdc, 0xd3, 0x4e, 0xb2, 0x1b, 0xa5, + 0xf1, 0xb2, 0xd7, 0xba, 0xf2, 0x09, 0x5f, 0xeb, 0xe2, 0x5e, 0x88, 0xff, 0x0c, 0xf2, 0x93, 0x2f, + 0xeb, 0x85, 0xf8, 0x07, 0xa1, 0x34, 0x1e, 0xbe, 0x16, 0x64, 0x56, 0xc8, 0x30, 0xc3, 0x19, 0x52, + 0xa9, 0x12, 0x12, 0xa4, 0xd0, 0x4f, 0xf4, 0x19, 0xf1, 0x5d, 0xc9, 0x67, 0xc4, 0xb5, 0x31, 0xa1, + 0x3b, 0xf9, 0xc3, 0x9c, 0xf4, 0xda, 0x35, 0x77, 0xf2, 0x6b, 0x57, 0xe3, 0x2f, 0x0a, 0x78, 0x26, + 0xf7, 0x4c, 0x81, 0xeb, 0x09, 0xb9, 0x76, 0x3d, 0x25, 0xd7, 0x9e, 0xcd, 0x35, 0x8c, 0x69, 0x36, + 0x43, 0xfe, 0x66, 0x77, 0x6b, 0xec, 0x9b, 0x9d, 0x44, 0x8c, 0x8f, 0x7f, 0xbc, 0x6b, 0xaf, 0x3d, + 0x7c, 0x54, 0x9b, 0xfa, 0xf8, 0x51, 0x6d, 0xea, 0x93, 0x47, 0xb5, 0xa9, 0x5f, 0x8e, 0x6a, 0xca, + 0xc3, 0x51, 0x4d, 0xf9, 0x78, 0x54, 0x53, 0x3e, 0x19, 0xd5, 0x94, 0x7f, 0x8c, 0x6a, 0xca, 0x6f, + 0x3f, 0xad, 0x4d, 0xdd, 0x2b, 0xf4, 0x6f, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x7f, 0x6b, + 0xc0, 0xb5, 0x28, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2290,6 +2329,39 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WhenScaled) + copy(dAtA[i:], m.WhenScaled) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenScaled))) + i-- + dAtA[i] = 0x12 + i -= len(m.WhenDeleted) + copy(dAtA[i:], m.WhenDeleted) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenDeleted))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2310,6 +2382,21 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PersistentVolumeClaimRetentionPolicy != nil { + { + size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 if m.RevisionHistoryLimit != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) i-- @@ -2399,6 +2486,9 @@ func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x58 if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -2948,6 +3038,19 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WhenDeleted) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenScaled) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetSpec) Size() (n int) { if m == nil { return 0 @@ -2978,6 +3081,11 @@ func (m *StatefulSetSpec) Size() (n int) { if m.RevisionHistoryLimit != nil { n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.PersistentVolumeClaimRetentionPolicy != nil { + l = m.PersistentVolumeClaimRetentionPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3005,6 +3113,7 @@ func (m *StatefulSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) return n } @@ -3390,6 +3499,17 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetPersistentVolumeClaimRetentionPolicy{`, + `WhenDeleted:` + fmt.Sprintf("%v", this.WhenDeleted) + `,`, + `WhenScaled:` + fmt.Sprintf("%v", this.WhenScaled) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetSpec) String() string { if this == nil { return "nil" @@ -3408,6 +3528,8 @@ func (this *StatefulSetSpec) String() string { `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, `}`, }, "") return s @@ -3431,6 +3553,7 @@ func (this *StatefulSetStatus) String() string { `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `}`, }, "") return s @@ -7450,6 +7573,120 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenDeleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenDeleted = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenScaled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenScaled = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7719,6 +7956,61 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } } m.RevisionHistoryLimit = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaimRetentionPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaimRetentionPolicy == nil { + m.PersistentVolumeClaimRetentionPolicy = &StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if err := m.PersistentVolumeClaimRetentionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7982,6 +8274,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index d4689d88b51b2..6e5517d81fa87 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -165,8 +165,8 @@ message DaemonSetStatus { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 desiredNumberScheduled = 3; - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // numberReady is the number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition. optional int32 numberReady = 4; // The most recent generation observed by the daemon set controller. @@ -219,7 +219,8 @@ message DaemonSetUpdateStrategy { // Deployment enables declarative updates for Pods and ReplicaSets. message Deployment { - // Standard object metadata. + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -321,7 +322,7 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. // +optional optional int32 readyReplicas = 7; @@ -366,7 +367,8 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -455,7 +457,7 @@ message ReplicaSetStatus { // +optional optional int32 fullyLabeledReplicas = 2; - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. // +optional optional int32 readyReplicas = 4; @@ -479,7 +481,7 @@ message RollingUpdateDaemonSet { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding down to a minimum of one. + // number is calculated from percentage by rounding up. // This cannot be 0 if MaxSurge is 0 // Default value is 1. // Example: when this is set to 30%, at most 30% of the total number of nodes @@ -511,7 +513,7 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. + // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -562,6 +564,8 @@ message RollingUpdateStatefulSetStrategy { // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -598,12 +602,32 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { + // Standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + // Items is the list of stateful sets. repeated StatefulSet items = 2; } +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +message StatefulSetPersistentVolumeClaimRetentionPolicy { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + optional string whenDeleted = 1; + + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + optional string whenScaled = 2; +} + // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { // replicas is the desired number of replicas of the given Template. @@ -663,6 +687,22 @@ message StatefulSetSpec { // consists of all revisions not represented by a currently applied // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + // +optional + optional int32 minReadySeconds = 9; + + // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent + // volume claims created from volumeClaimTemplates. By default, all persistent + // volume claims are created as needed and retained until manually deleted. This + // policy allows the lifecycle to be altered, for example by deleting persistent + // volume claims when their stateful set is deleted, or when their pod is scaled + // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, + // which is alpha. +optional + optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -675,7 +715,7 @@ message StatefulSetStatus { // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. optional int32 readyReplicas = 3; // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -705,6 +745,10 @@ message StatefulSetStatus { // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + optional int32 availableReplicas = 11; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 48299f18ecbb1..469b47297f640 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -34,6 +34,7 @@ const ( // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -44,6 +45,8 @@ const ( // map to the same storage identity. type StatefulSet struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -58,6 +61,7 @@ type StatefulSet struct { } // PodManagementPolicyType defines the policy for creating pods under a stateful set. +// +enum type PodManagementPolicyType string const ( @@ -87,6 +91,7 @@ type StatefulSetUpdateStrategy struct { // StatefulSetUpdateStrategyType is a string enumeration type that enumerates // all possible update strategies for the StatefulSet controller. +// +enum type StatefulSetUpdateStrategyType string const ( @@ -113,6 +118,40 @@ type RollingUpdateStatefulSetStrategy struct { Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` } +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimRetentionPolicy. + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -172,6 +211,22 @@ type StatefulSetSpec struct { // consists of all revisions not represented by a currently applied // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent + // volume claims created from volumeClaimTemplates. By default, all persistent + // volume claims are created as needed and retained until manually deleted. This + // policy allows the lifecycle to be altered, for example by deleting persistent + // volume claims when their stateful set is deleted, or when their pod is scaled + // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, + // which is alpha. +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -184,7 +239,7 @@ type StatefulSetStatus struct { // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -214,6 +269,10 @@ type StatefulSetStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } type StatefulSetConditionType string @@ -240,20 +299,26 @@ type StatefulSetCondition struct { // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { metav1.TypeMeta `json:",inline"` + // Standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` + + // Items is the list of stateful sets. + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -332,6 +397,7 @@ type DeploymentStrategy struct { RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` } +// +enum type DeploymentStrategyType string const ( @@ -386,7 +452,7 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` @@ -473,6 +539,7 @@ type DaemonSetUpdateStrategy struct { RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` } +// +enum type DaemonSetUpdateStrategyType string const ( @@ -488,7 +555,7 @@ type RollingUpdateDaemonSet struct { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding down to a minimum of one. + // number is calculated from percentage by rounding up. // This cannot be 0 if MaxSurge is 0 // Default value is 1. // Example: when this is set to 30%, at most 30% of the total number of nodes @@ -520,7 +587,7 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. + // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -575,8 +642,8 @@ type DaemonSetStatus struct { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // numberReady is the number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition. NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` // The most recent generation observed by the daemon set controller. @@ -682,6 +749,7 @@ type DaemonSetList struct { // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -690,7 +758,8 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -761,7 +830,7 @@ type ReplicaSetStatus struct { // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index b9783ad20ed15..f640f9cdd661d 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -100,7 +100,7 @@ var map_DaemonSetStatus = map[string]string{ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "numberReady": "numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition.", "observedGeneration": "The most recent generation observed by the daemon set controller.", "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", @@ -125,7 +125,7 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { var map_Deployment = map[string]string{ "": "Deployment enables declarative updates for Pods and ReplicaSets.", - "metadata": "Standard object metadata.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior of the Deployment.", "status": "Most recently observed status of the Deployment.", } @@ -179,7 +179,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", + "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -250,7 +250,7 @@ var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", + "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", @@ -262,8 +262,8 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", - "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { @@ -290,9 +290,10 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", - "spec": "Spec defines the desired identities of pods in this set.", - "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } func (StatefulSet) SwaggerDoc() map[string]string { @@ -313,23 +314,37 @@ func (StatefulSetCondition) SwaggerDoc() map[string]string { } var map_StatefulSetList = map[string]string{ - "": "StatefulSetList is a collection of StatefulSets.", + "": "StatefulSetList is a collection of StatefulSets.", + "metadata": "Standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of stateful sets.", } func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ + "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", +} + +func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { + return map_StatefulSetPersistentVolumeClaimRetentionPolicy +} + var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -340,13 +355,14 @@ var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "readyReplicas": "readyReplicas is the number of pods created for this StatefulSet with a Ready Condition.", "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 0c80548521d53..8e4d4261a2905 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -687,6 +688,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in @@ -714,6 +731,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(int32) **out = **in } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 6bc56e382aa3d..74584223c9929 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -553,10 +553,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { + *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} +} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo + func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{18} + return fileDescriptor_2a07313e8f66e805, []int{19} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +614,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{19} + return fileDescriptor_2a07313e8f66e805, []int{20} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +642,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{21} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -658,6 +688,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") @@ -668,123 +699,130 @@ func init() { } var fileDescriptor_2a07313e8f66e805 = []byte{ - // 1855 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, - 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, - 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, - 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, - 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, - 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, - 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, - 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, - 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, - 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, - 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, - 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, - 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, - 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, - 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, - 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, - 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, - 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, - 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, - 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, - 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, - 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, - 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, - 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, - 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, - 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, - 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, - 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, - 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, - 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, - 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, - 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, - 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, - 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, - 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, - 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, - 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, - 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, - 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, - 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, - 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, - 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, - 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, - 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, - 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, - 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, - 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, - 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, - 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, - 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, - 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, - 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, - 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, - 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, - 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, - 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, - 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, - 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, - 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, - 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, - 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, - 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, - 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, - 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, - 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, - 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, - 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, - 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, - 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, - 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, - 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, - 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, - 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, - 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, - 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, - 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, - 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, - 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, - 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, - 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, - 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, - 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, - 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, - 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, - 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, - 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, - 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, - 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, - 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, - 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, - 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, - 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, - 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, - 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, - 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, - 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, - 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, - 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, - 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, - 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, - 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, - 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, - 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, + // 1968 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x4a, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x49, 0x81, 0x35, + 0x12, 0xe5, 0x43, 0xcb, 0x58, 0x49, 0x83, 0xc4, 0x2e, 0xdc, 0x8a, 0x92, 0x1b, 0x3b, 0x90, 0x22, + 0x65, 0x24, 0xc5, 0x68, 0xfa, 0x81, 0x0c, 0xc9, 0x31, 0xb5, 0xd1, 0x7e, 0x61, 0x77, 0xc8, 0x98, + 0xe8, 0xa5, 0x7f, 0x40, 0x81, 0xf4, 0xdc, 0xbf, 0xa2, 0xb7, 0x16, 0xed, 0xad, 0x87, 0xc2, 0xc7, + 0xa0, 0x97, 0xa6, 0x17, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, + 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x9b, 0x99, 0xf7, 0x31, + 0xef, 0x47, 0xf8, 0xe1, 0xf9, 0xbb, 0xae, 0xaa, 0x59, 0xcd, 0xf3, 0x7e, 0x9b, 0x3a, 0x26, 0x65, + 0xd4, 0x6d, 0x0e, 0xa8, 0xd9, 0xb5, 0x9c, 0xa6, 0x14, 0x10, 0x5b, 0x6b, 0x12, 0xdb, 0x76, 0x9b, + 0x83, 0x5b, 0x6d, 0xca, 0xc8, 0xad, 0x66, 0x8f, 0x9a, 0xd4, 0x21, 0x8c, 0x76, 0x55, 0xdb, 0xb1, + 0x98, 0x85, 0xd6, 0x3c, 0x45, 0x95, 0xd8, 0x9a, 0xca, 0x15, 0x55, 0xa9, 0xb8, 0xbe, 0xd5, 0xd3, + 0xd8, 0x59, 0xbf, 0xad, 0x76, 0x2c, 0xa3, 0xd9, 0xb3, 0x7a, 0x56, 0x53, 0xe8, 0xb7, 0xfb, 0x8f, + 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x3c, 0x9c, 0xf5, 0x46, 0xc4, 0x61, 0xc7, 0x72, 0x68, 0x73, 0x90, + 0xf2, 0xb5, 0xfe, 0x76, 0xa8, 0x63, 0x90, 0xce, 0x99, 0x66, 0x52, 0x67, 0xd8, 0xb4, 0xcf, 0x7b, + 0x7c, 0xc1, 0x6d, 0x1a, 0x94, 0x91, 0x2c, 0xab, 0x66, 0x9e, 0x95, 0xd3, 0x37, 0x99, 0x66, 0xd0, + 0x94, 0xc1, 0x3b, 0x97, 0x19, 0xb8, 0x9d, 0x33, 0x6a, 0x90, 0x94, 0xdd, 0x5b, 0x79, 0x76, 0x7d, + 0xa6, 0xe9, 0x4d, 0xcd, 0x64, 0x2e, 0x73, 0x92, 0x46, 0x8d, 0x7f, 0x2b, 0x80, 0x76, 0x2d, 0x93, + 0x39, 0x96, 0xae, 0x53, 0x07, 0xd3, 0x81, 0xe6, 0x6a, 0x96, 0x89, 0x3e, 0x85, 0x32, 0x3f, 0x4f, + 0x97, 0x30, 0x52, 0x55, 0x36, 0x94, 0xcd, 0xc5, 0xed, 0x37, 0xd5, 0xf0, 0xa6, 0x03, 0x78, 0xd5, + 0x3e, 0xef, 0xf1, 0x05, 0x57, 0xe5, 0xda, 0xea, 0xe0, 0x96, 0x7a, 0xd8, 0xfe, 0x8c, 0x76, 0xd8, + 0x01, 0x65, 0xa4, 0x85, 0x9e, 0x8c, 0xea, 0x53, 0xe3, 0x51, 0x1d, 0xc2, 0x35, 0x1c, 0xa0, 0xa2, + 0x43, 0x98, 0x15, 0xe8, 0xd3, 0x02, 0x7d, 0x2b, 0x17, 0x5d, 0x1e, 0x5a, 0xc5, 0xe4, 0xf3, 0x7b, + 0x8f, 0x19, 0x35, 0xf9, 0xf6, 0x5a, 0x2f, 0x48, 0xe8, 0xd9, 0x3d, 0xc2, 0x08, 0x16, 0x40, 0xe8, + 0x0d, 0x28, 0x3b, 0x72, 0xfb, 0xd5, 0x99, 0x0d, 0x65, 0x73, 0xa6, 0x75, 0x4d, 0x6a, 0x95, 0xfd, + 0x63, 0xe1, 0x40, 0xa3, 0xf1, 0x44, 0x81, 0x1b, 0xe9, 0x73, 0xef, 0x6b, 0x2e, 0x43, 0x3f, 0x4d, + 0x9d, 0x5d, 0x2d, 0x76, 0x76, 0x6e, 0x2d, 0x4e, 0x1e, 0x38, 0xf6, 0x57, 0x22, 0xe7, 0x3e, 0x82, + 0x92, 0xc6, 0xa8, 0xe1, 0x56, 0xa7, 0x37, 0x66, 0x36, 0x17, 0xb7, 0x5f, 0x57, 0x73, 0x12, 0x58, + 0x4d, 0xef, 0xae, 0xb5, 0x24, 0x71, 0x4b, 0x0f, 0x38, 0x02, 0xf6, 0x80, 0x1a, 0xbf, 0x9a, 0x06, + 0xd8, 0xa3, 0xb6, 0x6e, 0x0d, 0x0d, 0x6a, 0xb2, 0x2b, 0x08, 0xdd, 0x03, 0x98, 0x75, 0x6d, 0xda, + 0x91, 0xa1, 0x7b, 0x25, 0xf7, 0x04, 0xe1, 0xa6, 0x8e, 0x6d, 0xda, 0x09, 0x83, 0xc6, 0xbf, 0xb0, + 0x80, 0x40, 0x1f, 0xc1, 0x9c, 0xcb, 0x08, 0xeb, 0xbb, 0x22, 0x64, 0x8b, 0xdb, 0xaf, 0x16, 0x01, + 0x13, 0x06, 0xad, 0x8a, 0x84, 0x9b, 0xf3, 0xbe, 0xb1, 0x04, 0x6a, 0xfc, 0x75, 0x06, 0x56, 0x42, + 0xe5, 0x5d, 0xcb, 0xec, 0x6a, 0x8c, 0xa7, 0xf4, 0x1d, 0x98, 0x65, 0x43, 0x9b, 0x8a, 0x3b, 0x59, + 0x68, 0xbd, 0xe2, 0x6f, 0xe6, 0x64, 0x68, 0xd3, 0x67, 0xa3, 0xfa, 0x5a, 0x86, 0x09, 0x17, 0x61, + 0x61, 0x84, 0xf6, 0x83, 0x7d, 0x4e, 0x0b, 0xf3, 0xb7, 0xe3, 0xce, 0x9f, 0x8d, 0xea, 0x19, 0x0d, + 0x44, 0x0d, 0x90, 0xe2, 0x5b, 0x44, 0x9f, 0x41, 0x45, 0x27, 0x2e, 0x3b, 0xb5, 0xbb, 0x84, 0xd1, + 0x13, 0xcd, 0xa0, 0xd5, 0x39, 0x71, 0xfa, 0xd7, 0x8a, 0x05, 0x8a, 0x5b, 0xb4, 0x6e, 0xc8, 0x1d, + 0x54, 0xf6, 0x63, 0x48, 0x38, 0x81, 0x8c, 0x06, 0x80, 0xf8, 0xca, 0x89, 0x43, 0x4c, 0xd7, 0x3b, + 0x15, 0xf7, 0x37, 0x3f, 0xb1, 0xbf, 0x75, 0xe9, 0x0f, 0xed, 0xa7, 0xd0, 0x70, 0x86, 0x07, 0xf4, + 0x32, 0xcc, 0x39, 0x94, 0xb8, 0x96, 0x59, 0x9d, 0x15, 0x37, 0x16, 0x84, 0x0b, 0x8b, 0x55, 0x2c, + 0xa5, 0xe8, 0x55, 0x98, 0x37, 0xa8, 0xeb, 0x92, 0x1e, 0xad, 0x96, 0x84, 0xe2, 0xb2, 0x54, 0x9c, + 0x3f, 0xf0, 0x96, 0xb1, 0x2f, 0x6f, 0xfc, 0x5e, 0x81, 0x4a, 0x18, 0xa6, 0x2b, 0xa8, 0xd5, 0xfb, + 0xf1, 0x5a, 0xfd, 0x6e, 0x81, 0xe4, 0xcc, 0xa9, 0xd1, 0x7f, 0x4c, 0x03, 0x0a, 0x95, 0xb0, 0xa5, + 0xeb, 0x6d, 0xd2, 0x39, 0x47, 0x1b, 0x30, 0x6b, 0x12, 0xc3, 0xcf, 0xc9, 0xa0, 0x40, 0x3e, 0x24, + 0x06, 0xc5, 0x42, 0x82, 0xbe, 0x50, 0x00, 0xf5, 0x45, 0x34, 0xbb, 0x3b, 0xa6, 0x69, 0x31, 0xc2, + 0x2f, 0xd8, 0xdf, 0xd0, 0x6e, 0x81, 0x0d, 0xf9, 0xbe, 0xd4, 0xd3, 0x14, 0xca, 0x3d, 0x93, 0x39, + 0xc3, 0x30, 0xb0, 0x69, 0x05, 0x9c, 0xe1, 0x1a, 0xfd, 0x04, 0xc0, 0x91, 0x98, 0x27, 0x96, 0x2c, + 0xdb, 0xfc, 0x1e, 0xe0, 0xbb, 0xdf, 0xb5, 0xcc, 0x47, 0x5a, 0x2f, 0x6c, 0x2c, 0x38, 0x80, 0xc0, + 0x11, 0xb8, 0xf5, 0x7b, 0xb0, 0x96, 0xb3, 0x4f, 0x74, 0x0d, 0x66, 0xce, 0xe9, 0xd0, 0xbb, 0x2a, + 0xcc, 0x7f, 0xa2, 0x55, 0x28, 0x0d, 0x88, 0xde, 0xa7, 0x5e, 0x4d, 0x62, 0xef, 0xe3, 0xf6, 0xf4, + 0xbb, 0x4a, 0xe3, 0xb7, 0xa5, 0x68, 0xa6, 0xf0, 0x7e, 0x83, 0x36, 0xf9, 0xf3, 0x60, 0xeb, 0x5a, + 0x87, 0xb8, 0x02, 0xa3, 0xd4, 0x7a, 0xc1, 0x7b, 0x1a, 0xbc, 0x35, 0x1c, 0x48, 0xd1, 0xcf, 0xa0, + 0xec, 0x52, 0x9d, 0x76, 0x98, 0xe5, 0xc8, 0x16, 0xf7, 0x56, 0xc1, 0x9c, 0x22, 0x6d, 0xaa, 0x1f, + 0x4b, 0x53, 0x0f, 0xde, 0xff, 0xc2, 0x01, 0x24, 0xfa, 0x08, 0xca, 0x8c, 0x1a, 0xb6, 0x4e, 0x18, + 0x95, 0xb7, 0x17, 0xcb, 0x2b, 0xde, 0x3b, 0x38, 0xd8, 0x91, 0xd5, 0x3d, 0x91, 0x6a, 0xa2, 0x7b, + 0x06, 0x79, 0xea, 0xaf, 0xe2, 0x00, 0x06, 0xfd, 0x18, 0xca, 0x2e, 0xe3, 0xaf, 0x7a, 0x6f, 0x28, + 0xaa, 0xed, 0xa2, 0x67, 0x25, 0xda, 0x47, 0x3d, 0x93, 0x10, 0xda, 0x5f, 0xc1, 0x01, 0x1c, 0xda, + 0x81, 0x65, 0x43, 0x33, 0x31, 0x25, 0xdd, 0xe1, 0x31, 0xed, 0x58, 0x66, 0xd7, 0x15, 0x65, 0x5a, + 0x6a, 0xad, 0x49, 0xa3, 0xe5, 0x83, 0xb8, 0x18, 0x27, 0xf5, 0xd1, 0x3e, 0xac, 0xfa, 0xcf, 0xee, + 0x7d, 0xcd, 0x65, 0x96, 0x33, 0xdc, 0xd7, 0x0c, 0x8d, 0x89, 0x9e, 0x57, 0x6a, 0x55, 0xc7, 0xa3, + 0xfa, 0x2a, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xde, 0x57, 0x6c, 0xd2, 0x77, 0x69, 0x57, 0xf4, 0xb0, + 0x72, 0xd8, 0x57, 0x8e, 0xc4, 0x2a, 0x96, 0x52, 0xf4, 0x30, 0x96, 0xa6, 0xe5, 0xc9, 0xd2, 0xb4, + 0x92, 0x9f, 0xa2, 0xe8, 0x14, 0xd6, 0x6c, 0xc7, 0xea, 0x39, 0xd4, 0x75, 0xf7, 0x28, 0xe9, 0xea, + 0x9a, 0x49, 0xfd, 0x9b, 0x59, 0x10, 0x27, 0x7a, 0x69, 0x3c, 0xaa, 0xaf, 0x1d, 0x65, 0xab, 0xe0, + 0x3c, 0xdb, 0xc6, 0x9f, 0x66, 0xe1, 0x5a, 0xf2, 0x8d, 0x43, 0x1f, 0x00, 0xb2, 0xda, 0x2e, 0x75, + 0x06, 0xb4, 0xfb, 0xbe, 0x37, 0xb8, 0xf1, 0xe9, 0x46, 0x11, 0xd3, 0x4d, 0x50, 0xb7, 0x87, 0x29, + 0x0d, 0x9c, 0x61, 0xe5, 0xcd, 0x47, 0xb2, 0x00, 0xa6, 0xc5, 0x46, 0x23, 0xf3, 0x51, 0xaa, 0x08, + 0x76, 0x60, 0x59, 0xd6, 0xbe, 0x2f, 0x14, 0xc9, 0x1a, 0x89, 0xfb, 0x69, 0x5c, 0x8c, 0x93, 0xfa, + 0xe8, 0x0e, 0x2c, 0x39, 0x3c, 0x0f, 0x02, 0x80, 0x79, 0x01, 0xf0, 0x2d, 0x09, 0xb0, 0x84, 0xa3, + 0x42, 0x1c, 0xd7, 0x45, 0xef, 0xc3, 0x75, 0x32, 0x20, 0x9a, 0x4e, 0xda, 0x3a, 0x0d, 0x00, 0x66, + 0x05, 0xc0, 0x8b, 0x12, 0xe0, 0xfa, 0x4e, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x01, 0xac, 0xf4, 0xcd, + 0x34, 0x94, 0x97, 0xc4, 0x2f, 0x49, 0xa8, 0x95, 0xd3, 0xb4, 0x0a, 0xce, 0xb2, 0x43, 0x9f, 0x02, + 0x74, 0xfc, 0x57, 0xdd, 0xad, 0xce, 0x89, 0x36, 0xfc, 0x46, 0x81, 0x62, 0x0b, 0x46, 0x81, 0xb0, + 0x05, 0x06, 0x4b, 0x2e, 0x8e, 0x60, 0xa2, 0xdb, 0x50, 0xe9, 0x58, 0xba, 0x2e, 0x32, 0x7f, 0xd7, + 0xea, 0x9b, 0x4c, 0x24, 0x6f, 0xa9, 0x85, 0xf8, 0x63, 0xbf, 0x1b, 0x93, 0xe0, 0x84, 0x66, 0xe3, + 0x8f, 0x4a, 0xf4, 0x99, 0xf1, 0xcb, 0x19, 0xdd, 0x8e, 0x8d, 0x3e, 0x2f, 0x27, 0x46, 0x9f, 0x1b, + 0x69, 0x8b, 0xc8, 0xe4, 0xa3, 0xc1, 0x12, 0x4f, 0x7e, 0xcd, 0xec, 0x79, 0x01, 0x97, 0x2d, 0xf1, + 0xcd, 0x0b, 0x4b, 0x29, 0xd0, 0x8e, 0x3c, 0x8c, 0xd7, 0x45, 0xcc, 0xa3, 0x42, 0x1c, 0x47, 0x6e, + 0xdc, 0x85, 0x4a, 0xbc, 0x0e, 0x63, 0x33, 0xbd, 0x72, 0xe9, 0x4c, 0xff, 0xb5, 0x02, 0x6b, 0x39, + 0xde, 0x91, 0x0e, 0x15, 0x83, 0x3c, 0x8e, 0x84, 0xf9, 0xd2, 0xd9, 0x98, 0xb3, 0x26, 0xd5, 0x63, + 0x4d, 0xea, 0x03, 0x93, 0x1d, 0x3a, 0xc7, 0xcc, 0xd1, 0xcc, 0x9e, 0x17, 0x87, 0x83, 0x18, 0x16, + 0x4e, 0x60, 0xa3, 0x4f, 0xa0, 0x6c, 0x90, 0xc7, 0xc7, 0x7d, 0xa7, 0x97, 0x75, 0x5f, 0xc5, 0xfc, + 0x88, 0xf7, 0xe3, 0x40, 0xa2, 0xe0, 0x00, 0xaf, 0x71, 0x08, 0x1b, 0xb1, 0x43, 0xf2, 0x56, 0x41, + 0x1f, 0xf5, 0xf5, 0x63, 0x1a, 0x06, 0xfc, 0x75, 0x58, 0xb0, 0x89, 0xc3, 0xb4, 0xa0, 0x5d, 0x94, + 0x5a, 0x4b, 0xe3, 0x51, 0x7d, 0xe1, 0xc8, 0x5f, 0xc4, 0xa1, 0xbc, 0xf1, 0x1f, 0x05, 0x4a, 0xc7, + 0x1d, 0xa2, 0xd3, 0x2b, 0xa0, 0x0e, 0x7b, 0x31, 0xea, 0xd0, 0xc8, 0x4d, 0x22, 0xb1, 0x9f, 0x5c, + 0xd6, 0xb0, 0x9f, 0x60, 0x0d, 0x37, 0x2f, 0xc1, 0xb9, 0x98, 0x30, 0xbc, 0x07, 0x0b, 0x81, 0xbb, + 0x58, 0x97, 0x54, 0x2e, 0xeb, 0x92, 0x8d, 0xdf, 0x4c, 0xc3, 0x62, 0xc4, 0xc5, 0x64, 0xd6, 0xfc, + 0xba, 0x23, 0x83, 0x06, 0xef, 0x24, 0xdb, 0x45, 0x0e, 0xa2, 0xfa, 0x43, 0x85, 0x37, 0xbf, 0x85, + 0xaf, 0x77, 0x7a, 0xd6, 0xb8, 0x0b, 0x15, 0x46, 0x9c, 0x1e, 0x65, 0xbe, 0x4c, 0x5c, 0xd8, 0x42, + 0x48, 0x1e, 0x4e, 0x62, 0x52, 0x9c, 0xd0, 0x5e, 0xbf, 0x03, 0x4b, 0x31, 0x67, 0x13, 0x0d, 0x61, + 0x5f, 0xf0, 0xcb, 0x09, 0x93, 0xf3, 0x0a, 0xb2, 0xeb, 0x83, 0x58, 0x76, 0x6d, 0xe6, 0x5f, 0x66, + 0xa4, 0x64, 0xf2, 0x72, 0x0c, 0x27, 0x72, 0xec, 0xb5, 0x42, 0x68, 0x17, 0x67, 0xda, 0x3f, 0xa7, + 0x61, 0x35, 0xa2, 0x1d, 0x72, 0xd3, 0xef, 0xc7, 0x1a, 0xf4, 0x66, 0xa2, 0x41, 0x57, 0xb3, 0x6c, + 0x9e, 0x1b, 0x39, 0xcd, 0x26, 0x8c, 0x33, 0xff, 0x8f, 0x84, 0xf1, 0x0f, 0x0a, 0x2c, 0x47, 0xee, + 0xee, 0x0a, 0x18, 0xe3, 0x83, 0x38, 0x63, 0xbc, 0x59, 0x24, 0x69, 0x72, 0x28, 0xe3, 0xbf, 0x14, + 0x68, 0x46, 0xb4, 0x8e, 0xa8, 0xe3, 0x6a, 0x2e, 0xa3, 0x26, 0xfb, 0xd8, 0xd2, 0xfb, 0x06, 0xdd, + 0xd5, 0x89, 0x66, 0x60, 0xca, 0x17, 0x34, 0xcb, 0x3c, 0xb2, 0x74, 0xad, 0x33, 0x44, 0x04, 0x16, + 0x3f, 0x3f, 0xa3, 0xe6, 0x1e, 0xd5, 0x29, 0xa3, 0x5d, 0x99, 0x4e, 0x3f, 0x90, 0xf0, 0x8b, 0x0f, + 0x43, 0xd1, 0xb3, 0x51, 0x7d, 0xb3, 0x08, 0xa2, 0xc8, 0xb2, 0x28, 0x26, 0xfa, 0x39, 0x00, 0xff, + 0x14, 0xfd, 0xa8, 0x2b, 0x13, 0xee, 0xae, 0x5f, 0x95, 0x0f, 0x03, 0xc9, 0x44, 0x0e, 0x22, 0x88, + 0x8d, 0xbf, 0xcd, 0xc7, 0x62, 0xf6, 0x8d, 0xe7, 0x6e, 0xbf, 0x80, 0xd5, 0x41, 0x78, 0x3b, 0xbe, + 0x02, 0x9f, 0x75, 0x67, 0x92, 0xff, 0x87, 0x05, 0xf0, 0x59, 0xf7, 0xda, 0xfa, 0xb6, 0x74, 0xb2, + 0xfa, 0x71, 0x06, 0x1c, 0xce, 0x74, 0x82, 0xbe, 0x07, 0x8b, 0x9c, 0x27, 0x68, 0x1d, 0xfa, 0x21, + 0x31, 0xfc, 0x7a, 0x5a, 0xf1, 0xf3, 0xe5, 0x38, 0x14, 0xe1, 0xa8, 0x1e, 0x3a, 0x83, 0x15, 0xdb, + 0xea, 0x1e, 0x10, 0x93, 0xf4, 0x28, 0x9f, 0xae, 0xbc, 0x50, 0x0a, 0x42, 0xb7, 0xd0, 0x7a, 0xc7, + 0x9f, 0xa9, 0x8f, 0xd2, 0x2a, 0xcf, 0x38, 0x33, 0x4a, 0x2f, 0x8b, 0x24, 0xc8, 0x82, 0x44, 0x0e, + 0x54, 0xfa, 0x72, 0xca, 0x91, 0xfc, 0xd6, 0xfb, 0xe7, 0x6a, 0xbb, 0x48, 0x61, 0x9d, 0xc6, 0x2c, + 0xc3, 0x47, 0x2f, 0xbe, 0x8e, 0x13, 0x1e, 0x72, 0xf9, 0x6a, 0xf9, 0x7f, 0xe2, 0xab, 0x19, 0x04, + 0x7a, 0x61, 0x42, 0x02, 0xfd, 0x67, 0x05, 0x6e, 0xda, 0x05, 0x6a, 0xa9, 0x0a, 0xe2, 0x6e, 0xee, + 0x17, 0xb9, 0x9b, 0x22, 0xb5, 0xd9, 0xda, 0x1c, 0x8f, 0xea, 0x37, 0x8b, 0x68, 0xe2, 0x42, 0xfb, + 0x6b, 0xfc, 0xae, 0x04, 0xd7, 0x53, 0xaf, 0x25, 0xfa, 0xd1, 0x05, 0x24, 0xf7, 0xc6, 0x73, 0x23, + 0xb8, 0x29, 0x76, 0x3a, 0x33, 0x01, 0x3b, 0xdd, 0x81, 0xe5, 0x4e, 0xdf, 0x71, 0xa8, 0xc9, 0x12, + 0xdc, 0x34, 0x08, 0xea, 0x6e, 0x5c, 0x8c, 0x93, 0xfa, 0x59, 0x04, 0xbb, 0x34, 0x21, 0xc1, 0x8e, + 0xee, 0x42, 0x92, 0x24, 0xaf, 0x04, 0xd3, 0xbb, 0x90, 0x5c, 0x29, 0xa9, 0xcf, 0x07, 0x44, 0x0f, + 0x35, 0x40, 0x98, 0x8f, 0x0f, 0x88, 0xa7, 0x31, 0x29, 0x4e, 0x68, 0x67, 0x90, 0xd5, 0x85, 0xa2, + 0x64, 0x15, 0x91, 0x18, 0x95, 0x06, 0xd1, 0xef, 0xb6, 0x8a, 0xe4, 0x6e, 0x71, 0x2e, 0x9d, 0xf9, + 0x2f, 0xc2, 0xe2, 0xe4, 0xff, 0x22, 0x34, 0xfe, 0xa2, 0xc0, 0x8b, 0xb9, 0x9d, 0x05, 0xed, 0xc4, + 0xc6, 0xb7, 0xad, 0xc4, 0xf8, 0xf6, 0x9d, 0x5c, 0xc3, 0xc8, 0x0c, 0xe7, 0x64, 0xd3, 0xec, 0xf7, + 0x8a, 0xd1, 0xec, 0x0c, 0x0e, 0x78, 0x39, 0xdf, 0x6e, 0x6d, 0x3d, 0x79, 0x5a, 0x9b, 0xfa, 0xf2, + 0x69, 0x6d, 0xea, 0xab, 0xa7, 0xb5, 0xa9, 0x5f, 0x8e, 0x6b, 0xca, 0x93, 0x71, 0x4d, 0xf9, 0x72, + 0x5c, 0x53, 0xbe, 0x1a, 0xd7, 0x94, 0xbf, 0x8f, 0x6b, 0xca, 0xaf, 0xbf, 0xae, 0x4d, 0x7d, 0x32, + 0x2f, 0x3d, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xfc, 0xe0, 0xc3, 0xad, 0x1d, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1686,6 +1724,39 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WhenScaled) + copy(dAtA[i:], m.WhenScaled) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenScaled))) + i-- + dAtA[i] = 0x12 + i -= len(m.WhenDeleted) + copy(dAtA[i:], m.WhenDeleted) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenDeleted))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1706,6 +1777,21 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PersistentVolumeClaimRetentionPolicy != nil { + { + size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 if m.RevisionHistoryLimit != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) i-- @@ -1795,6 +1881,9 @@ func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x58 if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -2206,6 +2295,19 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WhenDeleted) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenScaled) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetSpec) Size() (n int) { if m == nil { return 0 @@ -2236,6 +2338,11 @@ func (m *StatefulSetSpec) Size() (n int) { if m.RevisionHistoryLimit != nil { n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.PersistentVolumeClaimRetentionPolicy != nil { + l = m.PersistentVolumeClaimRetentionPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2265,6 +2372,7 @@ func (m *StatefulSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) return n } @@ -2550,6 +2658,17 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetPersistentVolumeClaimRetentionPolicy{`, + `WhenDeleted:` + fmt.Sprintf("%v", this.WhenDeleted) + `,`, + `WhenScaled:` + fmt.Sprintf("%v", this.WhenScaled) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetSpec) String() string { if this == nil { return "nil" @@ -2568,6 +2687,8 @@ func (this *StatefulSetSpec) String() string { `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, `}`, }, "") return s @@ -2591,6 +2712,7 @@ func (this *StatefulSetStatus) String() string { `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `}`, }, "") return s @@ -5425,6 +5547,120 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenDeleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenDeleted = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenScaled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenScaled = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5694,6 +5930,61 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } } m.RevisionHistoryLimit = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaimRetentionPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaimRetentionPolicy == nil { + m.PersistentVolumeClaimRetentionPolicy = &StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if err := m.PersistentVolumeClaimRetentionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5958,6 +6249,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 888f3e79e17e5..9f9df98fce6f4 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -190,7 +190,7 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional optional int32 readyReplicas = 7; @@ -367,6 +367,23 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +message StatefulSetPersistentVolumeClaimRetentionPolicy { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + optional string whenDeleted = 1; + + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + optional string whenScaled = 2; +} + // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { // replicas is the desired number of replicas of the given Template. @@ -427,6 +444,19 @@ message StatefulSetSpec { // consists of all revisions not represented by a currently applied // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + // +optional + optional int32 minReadySeconds = 9; + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -439,7 +469,7 @@ message StatefulSetStatus { // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. optional int32 readyReplicas = 3; // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -469,6 +499,10 @@ message StatefulSetStatus { // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + optional int32 availableReplicas = 11; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 9f822faee171f..832ef34f45f69 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -158,6 +158,40 @@ type RollingUpdateStatefulSetStrategy struct { Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` } +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimRetentionPolicy. + RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -218,6 +252,19 @@ type StatefulSetSpec struct { // consists of all revisions not represented by a currently applied // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -230,7 +277,7 @@ type StatefulSetStatus struct { // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -260,6 +307,10 @@ type StatefulSetStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } type StatefulSetConditionType string @@ -472,7 +523,7 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 504b858639ea7..e92881a35da15 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -116,7 +116,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", + "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -227,16 +227,28 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ + "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", +} + +func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { + return map_StatefulSetPersistentVolumeClaimRetentionPolicy +} + var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -247,13 +259,14 @@ var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "readyReplicas": "readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition.", "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index fb27612419e7a..be3fcc75b62cd 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -499,6 +500,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in @@ -526,6 +543,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(int32) **out = **in } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go index f3850fc90cf2a..1e2233e31791f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index b2e5c2e972387..cd1a06e25f9d8 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -833,10 +833,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { + *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} +} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo + func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{28} + return fileDescriptor_42fe616264472f7e, []int{29} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +894,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{29} + return fileDescriptor_42fe616264472f7e, []int{30} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +922,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{30} + return fileDescriptor_42fe616264472f7e, []int{31} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,6 +977,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") + proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") @@ -957,143 +988,150 @@ func init() { } var fileDescriptor_42fe616264472f7e = []byte{ - // 2169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, - 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, - 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, - 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, - 0xf5, 0xa2, 0x97, 0x5e, 0x0b, 0x14, 0x68, 0x7b, 0xed, 0x3f, 0xd1, 0x5b, 0x51, 0xb4, 0xb7, 0x22, - 0x28, 0x7c, 0x29, 0x10, 0xf4, 0x92, 0x9c, 0x84, 0x7a, 0x73, 0x2a, 0x8a, 0x5e, 0x0a, 0xf4, 0x12, - 0xa0, 0x40, 0x41, 0x0e, 0xe7, 0x83, 0xf3, 0xe1, 0x1d, 0x29, 0x8e, 0xd2, 0x04, 0xb9, 0x69, 0xc9, - 0xe7, 0x7d, 0xf8, 0xbe, 0xe4, 0x4b, 0xbe, 0x0f, 0x39, 0x02, 0xdf, 0x3b, 0x7e, 0xcb, 0x55, 0x35, - 0xab, 0x75, 0xec, 0x1d, 0x12, 0xc7, 0x24, 0x94, 0xb8, 0xad, 0x3e, 0x31, 0xbb, 0x96, 0xd3, 0x12, - 0x1d, 0xd8, 0xd6, 0x5a, 0xd8, 0xb6, 0xdd, 0x56, 0xff, 0xe6, 0x21, 0xa1, 0x78, 0xad, 0xd5, 0x23, - 0x26, 0x71, 0x30, 0x25, 0x5d, 0xd5, 0x76, 0x2c, 0x6a, 0xc1, 0x45, 0x1f, 0xa8, 0x62, 0x5b, 0x53, - 0x19, 0x50, 0x15, 0xc0, 0xa5, 0xd5, 0x9e, 0x46, 0x8f, 0xbc, 0x43, 0xb5, 0x63, 0x19, 0xad, 0x9e, - 0xd5, 0xb3, 0x5a, 0x1c, 0x7f, 0xe8, 0x3d, 0xe2, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0xb3, 0xd4, - 0x8c, 0x0d, 0xd8, 0xb1, 0x1c, 0xd2, 0xea, 0xdf, 0x4c, 0x8e, 0xb5, 0xf4, 0x7a, 0x84, 0x31, 0x70, - 0xe7, 0x48, 0x33, 0x89, 0x33, 0x68, 0xd9, 0xc7, 0x3d, 0xd6, 0xe0, 0xb6, 0x0c, 0x42, 0x71, 0x96, - 0x55, 0x2b, 0xcf, 0xca, 0xf1, 0x4c, 0xaa, 0x19, 0x24, 0x65, 0xf0, 0xe6, 0x28, 0x03, 0xb7, 0x73, - 0x44, 0x0c, 0x9c, 0xb2, 0x7b, 0x2d, 0xcf, 0xce, 0xa3, 0x9a, 0xde, 0xd2, 0x4c, 0xea, 0x52, 0x27, - 0x69, 0xd4, 0xfc, 0xb7, 0x02, 0xe0, 0x86, 0x65, 0x52, 0xc7, 0xd2, 0x75, 0xe2, 0x20, 0xd2, 0xd7, - 0x5c, 0xcd, 0x32, 0xe1, 0x43, 0x50, 0x63, 0xf1, 0x74, 0x31, 0xc5, 0x75, 0xe5, 0x8a, 0xb2, 0x32, - 0xb5, 0x76, 0x43, 0x8d, 0x66, 0x3a, 0xa4, 0x57, 0xed, 0xe3, 0x1e, 0x6b, 0x70, 0x55, 0x86, 0x56, - 0xfb, 0x37, 0xd5, 0xdd, 0xc3, 0x0f, 0x48, 0x87, 0x6e, 0x13, 0x8a, 0xdb, 0xf0, 0xc9, 0xc9, 0xf2, - 0xd8, 0xf0, 0x64, 0x19, 0x44, 0x6d, 0x28, 0x64, 0x85, 0xbb, 0xa0, 0xc2, 0xd9, 0x4b, 0x9c, 0x7d, - 0x35, 0x97, 0x5d, 0x04, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1e, 0x53, 0x62, 0x32, 0xf7, 0xda, 0x17, - 0x04, 0x75, 0x65, 0x13, 0x53, 0x8c, 0x38, 0x11, 0xbc, 0x0e, 0x6a, 0x8e, 0x70, 0xbf, 0x5e, 0xbe, - 0xa2, 0xac, 0x94, 0xdb, 0x17, 0x05, 0xaa, 0x16, 0x84, 0x85, 0x42, 0x44, 0xf3, 0x89, 0x02, 0x16, - 0xd2, 0x71, 0x6f, 0x69, 0x2e, 0x85, 0x3f, 0x4c, 0xc5, 0xae, 0x16, 0x8b, 0x9d, 0x59, 0xf3, 0xc8, - 0xc3, 0x81, 0x83, 0x96, 0x58, 0xdc, 0x7b, 0xa0, 0xaa, 0x51, 0x62, 0xb8, 0xf5, 0xd2, 0x95, 0xf2, - 0xca, 0xd4, 0xda, 0x35, 0x35, 0x27, 0x81, 0xd5, 0xb4, 0x77, 0xed, 0x69, 0xc1, 0x5b, 0xbd, 0xc7, - 0x18, 0x90, 0x4f, 0xd4, 0xfc, 0x79, 0x09, 0x4c, 0x6e, 0x62, 0x62, 0x58, 0xe6, 0x3e, 0xa1, 0xe7, - 0xb0, 0x72, 0x77, 0x41, 0xc5, 0xb5, 0x49, 0x47, 0xac, 0xdc, 0xd5, 0xdc, 0x00, 0x42, 0x9f, 0xf6, - 0x6d, 0xd2, 0x89, 0x96, 0x8c, 0xfd, 0x42, 0x9c, 0x01, 0xee, 0x81, 0x71, 0x97, 0x62, 0xea, 0xb9, - 0x7c, 0xc1, 0xa6, 0xd6, 0x56, 0x0a, 0x70, 0x71, 0x7c, 0x7b, 0x46, 0xb0, 0x8d, 0xfb, 0xbf, 0x91, - 0xe0, 0x69, 0xfe, 0xbd, 0x04, 0x60, 0x88, 0xdd, 0xb0, 0xcc, 0xae, 0x46, 0x59, 0x3a, 0xdf, 0x02, - 0x15, 0x3a, 0xb0, 0x09, 0x9f, 0x90, 0xc9, 0xf6, 0xd5, 0xc0, 0x95, 0xfb, 0x03, 0x9b, 0x7c, 0x76, - 0xb2, 0xbc, 0x90, 0xb6, 0x60, 0x3d, 0x88, 0xdb, 0xc0, 0xad, 0xd0, 0xc9, 0x12, 0xb7, 0x7e, 0x5d, - 0x1e, 0xfa, 0xb3, 0x93, 0xe5, 0x8c, 0xb3, 0x43, 0x0d, 0x99, 0x64, 0x07, 0x61, 0x1f, 0x40, 0x1d, - 0xbb, 0xf4, 0xbe, 0x83, 0x4d, 0xd7, 0x1f, 0x49, 0x33, 0x88, 0x08, 0xff, 0xd5, 0x62, 0x0b, 0xc5, - 0x2c, 0xda, 0x4b, 0xc2, 0x0b, 0xb8, 0x95, 0x62, 0x43, 0x19, 0x23, 0xc0, 0xab, 0x60, 0xdc, 0x21, - 0xd8, 0xb5, 0xcc, 0x7a, 0x85, 0x47, 0x11, 0x4e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, 0xc0, - 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, 0xa3, - 0xa0, 0xbf, 0xf9, 0x3b, 0x05, 0x4c, 0x87, 0x33, 0x77, 0x0e, 0x3b, 0xe7, 0x8e, 0xbc, 0x73, 0x9a, - 0xa3, 0x93, 0x25, 0x67, 0xc3, 0x7c, 0x58, 0x8e, 0x39, 0xce, 0xd2, 0x11, 0xfe, 0x08, 0xd4, 0x5c, - 0xa2, 0x93, 0x0e, 0xb5, 0x1c, 0xe1, 0xf8, 0x6b, 0x05, 0x1d, 0xc7, 0x87, 0x44, 0xdf, 0x17, 0xa6, - 0xed, 0x0b, 0xcc, 0xf3, 0xe0, 0x17, 0x0a, 0x29, 0xe1, 0x7b, 0xa0, 0x46, 0x89, 0x61, 0xeb, 0x98, - 0x12, 0xb1, 0x6b, 0x5e, 0x8c, 0x3b, 0xcf, 0x72, 0x86, 0x91, 0xed, 0x59, 0xdd, 0xfb, 0x02, 0xc6, - 0xb7, 0x4c, 0x38, 0x19, 0x41, 0x2b, 0x0a, 0x69, 0xa0, 0x0d, 0x66, 0x3c, 0xbb, 0xcb, 0x90, 0x94, - 0x1d, 0xe7, 0xbd, 0x81, 0xc8, 0xa1, 0x1b, 0xa3, 0x67, 0xe5, 0x40, 0xb2, 0x6b, 0x2f, 0x88, 0x51, - 0x66, 0xe4, 0x76, 0x94, 0xe0, 0x87, 0xeb, 0x60, 0xd6, 0xd0, 0x4c, 0x44, 0x70, 0x77, 0xb0, 0x4f, - 0x3a, 0x96, 0xd9, 0x75, 0x79, 0x2a, 0x55, 0xdb, 0x8b, 0x82, 0x60, 0x76, 0x5b, 0xee, 0x46, 0x49, - 0x3c, 0xdc, 0x02, 0xf3, 0xc1, 0x01, 0x7c, 0x57, 0x73, 0xa9, 0xe5, 0x0c, 0xb6, 0x34, 0x43, 0xa3, - 0xf5, 0x71, 0xce, 0x53, 0x1f, 0x9e, 0x2c, 0xcf, 0xa3, 0x8c, 0x7e, 0x94, 0x69, 0xd5, 0xfc, 0xf5, - 0x38, 0x98, 0x4d, 0x9c, 0x0b, 0xf0, 0x01, 0x58, 0xe8, 0x78, 0x8e, 0x43, 0x4c, 0xba, 0xe3, 0x19, - 0x87, 0xc4, 0xd9, 0xef, 0x1c, 0x91, 0xae, 0xa7, 0x93, 0x2e, 0x5f, 0xd6, 0x6a, 0xbb, 0x21, 0x7c, - 0x5d, 0xd8, 0xc8, 0x44, 0xa1, 0x1c, 0x6b, 0xf8, 0x2e, 0x80, 0x26, 0x6f, 0xda, 0xd6, 0x5c, 0x37, - 0xe4, 0x2c, 0x71, 0xce, 0x70, 0x2b, 0xee, 0xa4, 0x10, 0x28, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xae, - 0xe6, 0x90, 0x6e, 0xd2, 0xc7, 0xb2, 0xec, 0xe3, 0x66, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0x37, 0xc0, - 0x94, 0x3f, 0x1a, 0x9f, 0x73, 0xb1, 0x38, 0x73, 0x82, 0x6c, 0x6a, 0x27, 0xea, 0x42, 0x71, 0x1c, - 0x0b, 0xcd, 0x3a, 0x74, 0x89, 0xd3, 0x27, 0xdd, 0x3b, 0xbe, 0x38, 0x60, 0x15, 0xb4, 0xca, 0x2b, - 0x68, 0x18, 0xda, 0x6e, 0x0a, 0x81, 0x32, 0xac, 0x58, 0x68, 0x7e, 0xd6, 0xa4, 0x42, 0x1b, 0x97, - 0x43, 0x3b, 0xc8, 0x44, 0xa1, 0x1c, 0x6b, 0x96, 0x7b, 0xbe, 0xcb, 0xeb, 0x7d, 0xac, 0xe9, 0xf8, - 0x50, 0x27, 0xf5, 0x09, 0x39, 0xf7, 0x76, 0xe4, 0x6e, 0x94, 0xc4, 0xc3, 0x3b, 0xe0, 0x92, 0xdf, - 0x74, 0x60, 0xe2, 0x90, 0xa4, 0xc6, 0x49, 0x5e, 0x10, 0x24, 0x97, 0x76, 0x92, 0x00, 0x94, 0xb6, - 0x81, 0xb7, 0xc0, 0x4c, 0xc7, 0xd2, 0x75, 0x9e, 0x8f, 0x1b, 0x96, 0x67, 0xd2, 0xfa, 0x24, 0x67, - 0x81, 0x6c, 0x0f, 0x6d, 0x48, 0x3d, 0x28, 0x81, 0x84, 0x3f, 0x06, 0xa0, 0x13, 0x14, 0x06, 0xb7, - 0x0e, 0x46, 0x28, 0x80, 0x74, 0x59, 0x8a, 0x2a, 0x73, 0xd8, 0xe4, 0xa2, 0x18, 0x65, 0xf3, 0x43, - 0x05, 0x2c, 0xe6, 0x6c, 0x74, 0xf8, 0x5d, 0xa9, 0x08, 0x5e, 0x4b, 0x14, 0xc1, 0xcb, 0x39, 0x66, - 0xb1, 0x4a, 0x78, 0x04, 0xa6, 0x99, 0x20, 0xd1, 0xcc, 0x9e, 0x0f, 0x11, 0x67, 0x59, 0x2b, 0x37, - 0x00, 0x14, 0x47, 0x47, 0xa7, 0xf2, 0xa5, 0xe1, 0xc9, 0xf2, 0xb4, 0xd4, 0x87, 0x64, 0xe2, 0xe6, - 0x2f, 0x4a, 0x00, 0x6c, 0x12, 0x5b, 0xb7, 0x06, 0x06, 0x31, 0xcf, 0x43, 0xd3, 0xdc, 0x93, 0x34, - 0xcd, 0xcb, 0xf9, 0x4b, 0x12, 0x3a, 0x95, 0x2b, 0x6a, 0xde, 0x4b, 0x88, 0x9a, 0x57, 0x8a, 0x90, - 0x3d, 0x5b, 0xd5, 0x7c, 0x5c, 0x06, 0x73, 0x11, 0x38, 0x92, 0x35, 0xb7, 0xa5, 0x15, 0x7d, 0x39, - 0xb1, 0xa2, 0x8b, 0x19, 0x26, 0x5f, 0x98, 0xae, 0xf9, 0x00, 0xcc, 0x30, 0xd5, 0xe1, 0xaf, 0x1f, - 0xd7, 0x34, 0xe3, 0xa7, 0xd6, 0x34, 0x61, 0x25, 0xda, 0x92, 0x98, 0x50, 0x82, 0x39, 0x47, 0x43, - 0x4d, 0x7c, 0x15, 0x35, 0xd4, 0xef, 0x15, 0x30, 0x13, 0x2d, 0xd3, 0x39, 0x88, 0xa8, 0xbb, 0xb2, - 0x88, 0x7a, 0xb1, 0x40, 0x72, 0xe6, 0xa8, 0xa8, 0x8f, 0x2b, 0x71, 0xd7, 0xb9, 0x8c, 0x5a, 0x61, - 0x57, 0x30, 0x5b, 0xd7, 0x3a, 0xd8, 0x15, 0xf5, 0xf6, 0x82, 0x7f, 0xfd, 0xf2, 0xdb, 0x50, 0xd8, - 0x2b, 0x09, 0xae, 0xd2, 0x17, 0x2b, 0xb8, 0xca, 0xcf, 0x47, 0x70, 0xfd, 0x00, 0xd4, 0xdc, 0x40, - 0x6a, 0x55, 0x38, 0xe5, 0xb5, 0x42, 0x1b, 0x5b, 0xa8, 0xac, 0x90, 0x3a, 0xd4, 0x57, 0x21, 0x5d, - 0x96, 0xb2, 0xaa, 0x7e, 0x99, 0xca, 0x8a, 0x25, 0xba, 0x8d, 0x3d, 0x97, 0x74, 0xf9, 0xa6, 0xaa, - 0x45, 0x89, 0xbe, 0xc7, 0x5b, 0x91, 0xe8, 0x85, 0x07, 0x60, 0xd1, 0x76, 0xac, 0x9e, 0x43, 0x5c, - 0x77, 0x93, 0xe0, 0xae, 0xae, 0x99, 0x24, 0x08, 0xc0, 0xaf, 0x89, 0x97, 0x87, 0x27, 0xcb, 0x8b, - 0x7b, 0xd9, 0x10, 0x94, 0x67, 0xdb, 0xfc, 0x53, 0x05, 0x5c, 0x4c, 0x9e, 0x8d, 0x39, 0x32, 0x45, - 0x39, 0x93, 0x4c, 0xb9, 0x1e, 0xcb, 0x53, 0x5f, 0xc3, 0xc5, 0x9e, 0x0a, 0x52, 0xb9, 0xba, 0x0e, - 0x66, 0x85, 0x2c, 0x09, 0x3a, 0x85, 0x50, 0x0b, 0x97, 0xe7, 0x40, 0xee, 0x46, 0x49, 0x3c, 0xbc, - 0x0d, 0xa6, 0x1d, 0xae, 0xbc, 0x02, 0x02, 0x5f, 0xbd, 0xfc, 0x9f, 0x20, 0x98, 0x46, 0xf1, 0x4e, - 0x24, 0x63, 0x99, 0x72, 0x89, 0x04, 0x49, 0x40, 0x50, 0x91, 0x95, 0xcb, 0x7a, 0x12, 0x80, 0xd2, - 0x36, 0x70, 0x1b, 0xcc, 0x79, 0x66, 0x9a, 0xca, 0xcf, 0xb5, 0xcb, 0x82, 0x6a, 0xee, 0x20, 0x0d, - 0x41, 0x59, 0x76, 0xf0, 0xa1, 0x24, 0x66, 0xc6, 0xf9, 0x79, 0x72, 0xbd, 0xc0, 0x9e, 0x28, 0xac, - 0x66, 0x32, 0xa4, 0x56, 0xad, 0xa8, 0xd4, 0x6a, 0xfe, 0x51, 0x01, 0x30, 0xbd, 0x0f, 0x47, 0xbe, - 0x04, 0xa4, 0x2c, 0x62, 0x15, 0x53, 0xcb, 0xd6, 0x3f, 0x37, 0x0a, 0xea, 0x9f, 0xe8, 0x40, 0x2d, - 0x26, 0x80, 0xc4, 0x44, 0x9f, 0xcf, 0xa3, 0x4e, 0x51, 0x01, 0x14, 0x39, 0xf5, 0x1c, 0x04, 0x50, - 0x8c, 0xec, 0xd9, 0x02, 0xe8, 0x1f, 0x25, 0x30, 0x17, 0x81, 0x0b, 0x0b, 0xa0, 0x0c, 0x93, 0x6f, - 0x1e, 0x76, 0x8a, 0x89, 0x92, 0x68, 0xea, 0xfe, 0x97, 0x44, 0x49, 0xe4, 0x55, 0x8e, 0x28, 0xf9, - 0x6d, 0x29, 0xee, 0xfa, 0x29, 0x45, 0xc9, 0x73, 0x78, 0xe1, 0xf8, 0xca, 0xe9, 0x9a, 0xe6, 0x9f, - 0xcb, 0xe0, 0x62, 0x72, 0x1f, 0x4a, 0x05, 0x52, 0x19, 0x59, 0x20, 0xf7, 0xc0, 0xfc, 0x23, 0x4f, - 0xd7, 0x07, 0x3c, 0x86, 0x58, 0x95, 0xf4, 0x4b, 0xeb, 0xff, 0x0b, 0xcb, 0xf9, 0xef, 0x67, 0x60, - 0x50, 0xa6, 0x65, 0xba, 0x5e, 0x56, 0x3e, 0x6f, 0xbd, 0xac, 0x9e, 0xa1, 0x5e, 0x66, 0x4b, 0x8e, - 0xf2, 0x99, 0x24, 0xc7, 0xe9, 0x8a, 0x65, 0xc6, 0xc1, 0x35, 0xf2, 0xea, 0x3f, 0x54, 0xc0, 0x42, - 0xf6, 0x85, 0x1b, 0xea, 0x60, 0xc6, 0xc0, 0x8f, 0xe3, 0x0f, 0x1f, 0xa3, 0x8a, 0x88, 0x47, 0x35, - 0x5d, 0xf5, 0x3f, 0x19, 0xa9, 0xf7, 0x4c, 0xba, 0xeb, 0xec, 0x53, 0x47, 0x33, 0x7b, 0x7e, 0xe5, - 0xdd, 0x96, 0xb8, 0x50, 0x82, 0x1b, 0xbe, 0x0f, 0x6a, 0x06, 0x7e, 0xbc, 0xef, 0x39, 0xbd, 0xac, - 0x0a, 0x59, 0x6c, 0x1c, 0xbe, 0x01, 0xb6, 0x05, 0x0b, 0x0a, 0xf9, 0x9a, 0x9f, 0x2a, 0x60, 0x31, - 0xa7, 0xaa, 0x7e, 0x8d, 0xa2, 0xdc, 0x05, 0x57, 0xa4, 0x20, 0xd9, 0xae, 0x24, 0x8f, 0x3c, 0x9d, - 0x6f, 0x50, 0x21, 0x64, 0xae, 0x81, 0x49, 0x1b, 0x3b, 0x54, 0x0b, 0x65, 0x70, 0xb5, 0x3d, 0x3d, - 0x3c, 0x59, 0x9e, 0xdc, 0x0b, 0x1a, 0x51, 0xd4, 0xdf, 0xfc, 0x8f, 0x02, 0xaa, 0xfb, 0x1d, 0xac, - 0x93, 0x73, 0x50, 0x12, 0x9b, 0x92, 0x92, 0xc8, 0x7f, 0xa5, 0xe7, 0xfe, 0xe4, 0x8a, 0x88, 0xad, - 0x84, 0x88, 0x78, 0x69, 0x04, 0xcf, 0xb3, 0xf5, 0xc3, 0xdb, 0x60, 0x32, 0x1c, 0xee, 0x74, 0x87, - 0x5b, 0xf3, 0x37, 0x25, 0x30, 0x15, 0x1b, 0xe2, 0x94, 0x47, 0xe3, 0x43, 0xa9, 0x1e, 0xb0, 0x4d, - 0xbf, 0x56, 0x24, 0x10, 0x35, 0x38, 0xfb, 0xdf, 0x31, 0xa9, 0x13, 0xbf, 0x3c, 0xa6, 0x4b, 0xc2, - 0x77, 0xc0, 0x0c, 0xc5, 0x4e, 0x8f, 0xd0, 0xa0, 0x8f, 0x4f, 0xd8, 0x64, 0xf4, 0x98, 0x72, 0x5f, - 0xea, 0x45, 0x09, 0xf4, 0xd2, 0x6d, 0x30, 0x2d, 0x0d, 0x06, 0x2f, 0x82, 0xf2, 0x31, 0x19, 0xf8, - 0x92, 0x0a, 0xb1, 0x3f, 0xe1, 0x3c, 0xa8, 0xf6, 0xb1, 0xee, 0xf9, 0x79, 0x3e, 0x89, 0xfc, 0x1f, - 0xb7, 0x4a, 0x6f, 0x29, 0xcd, 0x5f, 0xb2, 0xc9, 0x89, 0x92, 0xf3, 0x1c, 0xb2, 0xeb, 0x5d, 0x29, - 0xbb, 0xf2, 0x3f, 0x18, 0xc6, 0xb7, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0xbd, 0x5a, 0x88, 0xed, - 0xd9, 0x99, 0xf6, 0xcf, 0x12, 0x98, 0x8f, 0xa1, 0x23, 0xa9, 0xfa, 0x6d, 0x49, 0xaa, 0xae, 0x24, - 0xa4, 0x6a, 0x3d, 0xcb, 0xe6, 0x1b, 0xad, 0x3a, 0x5a, 0xab, 0xfe, 0x41, 0x01, 0xb3, 0xb1, 0xb9, - 0x3b, 0x07, 0xb1, 0x7a, 0x4f, 0x16, 0xab, 0x2f, 0x15, 0x49, 0x9a, 0x1c, 0xb5, 0xfa, 0x97, 0xaa, - 0xe4, 0xfc, 0xd7, 0xfe, 0x0d, 0xed, 0xa7, 0x60, 0xbe, 0x6f, 0xe9, 0x9e, 0x41, 0x36, 0x74, 0xac, - 0x19, 0x01, 0x80, 0xa9, 0xbb, 0x72, 0xf2, 0x9e, 0x18, 0xd2, 0x13, 0xc7, 0xd5, 0x5c, 0x4a, 0x4c, - 0xfa, 0x20, 0xb2, 0x8c, 0x34, 0xe5, 0x83, 0x0c, 0x3a, 0x94, 0x39, 0x08, 0x7c, 0x03, 0x4c, 0x31, - 0x55, 0xa6, 0x75, 0xc8, 0x0e, 0x36, 0x82, 0xc4, 0x0a, 0x3f, 0x8f, 0xed, 0x47, 0x5d, 0x28, 0x8e, - 0x83, 0x47, 0x60, 0xce, 0xb6, 0xba, 0xdb, 0xd8, 0xc4, 0x3d, 0xc2, 0x64, 0xc6, 0x9e, 0xa5, 0x6b, - 0x9d, 0x01, 0x7f, 0x58, 0x9b, 0x6c, 0xbf, 0x19, 0x3c, 0x9a, 0xec, 0xa5, 0x21, 0xec, 0x02, 0x9a, - 0xd1, 0xcc, 0x37, 0x75, 0x16, 0x25, 0x74, 0x52, 0x9f, 0x74, 0xfd, 0x27, 0xed, 0xb5, 0x22, 0x19, - 0x76, 0xc6, 0x8f, 0xba, 0x79, 0xef, 0x86, 0xb5, 0x33, 0x7d, 0x91, 0xfd, 0x57, 0x05, 0x5c, 0x4a, - 0x1d, 0x95, 0x5f, 0xe2, 0xcb, 0x5d, 0xea, 0x1a, 0x51, 0x3e, 0xc5, 0x35, 0x62, 0x1d, 0xcc, 0x8a, - 0x8f, 0xc1, 0x89, 0x5b, 0x48, 0x78, 0x1b, 0xdc, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, 0x72, 0x58, - 0x3d, 0xe5, 0xcb, 0x61, 0xdc, 0x0b, 0xf1, 0xcf, 0x4d, 0x7e, 0xea, 0xa5, 0xbd, 0x10, 0xff, 0xe3, - 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, - 0xfd, 0xb9, 0x3e, 0x78, 0xe2, 0x8c, 0x0f, 0x9e, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xbd, 0xe7, 0xaf, - 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, - 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0xf6, 0x7b, 0xbb, 0xd8, 0xb3, 0x5f, 0x86, 0x76, 0x1f, 0xfd, 0xfe, - 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, 0x63, 0x3f, - 0x1b, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, 0x7f, 0x1b, - 0x36, 0x94, 0x5f, 0x7d, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0xdf, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0xf7, 0x24, 0x13, 0x48, 0x29, 0x00, 0x00, + // 2284 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xf7, 0xf2, 0x87, 0x44, 0x8e, 0x2c, 0xc9, 0x1e, 0xe9, 0x2b, 0x31, 0xf2, 0xb7, 0xa4, 0xb1, + 0x31, 0x1c, 0x25, 0xb6, 0x48, 0x5b, 0xf9, 0x81, 0xc4, 0x2e, 0x92, 0x8a, 0x52, 0x6a, 0x3b, 0xd0, + 0x0f, 0x66, 0x64, 0x39, 0x68, 0xd0, 0x1f, 0x1e, 0x91, 0x63, 0x6a, 0xa3, 0xe5, 0xee, 0x62, 0x77, + 0x96, 0x31, 0xd1, 0x4b, 0xaf, 0x05, 0x0a, 0xb4, 0xbd, 0xf6, 0x9f, 0xe8, 0xad, 0x28, 0x9a, 0x5b, + 0x11, 0x04, 0x3e, 0x06, 0xbd, 0x24, 0xbd, 0x10, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x81, + 0x02, 0xc5, 0xcc, 0xce, 0xfe, 0xde, 0x35, 0x97, 0x8a, 0xa3, 0x34, 0x41, 0x6e, 0xe2, 0xbc, 0xf7, + 0x3e, 0xf3, 0xde, 0xcc, 0x7b, 0xf3, 0x3e, 0x33, 0x2b, 0xf0, 0x83, 0xe3, 0xd7, 0xad, 0xba, 0xa2, + 0x37, 0x8e, 0xed, 0x43, 0x62, 0x6a, 0x84, 0x12, 0xab, 0xd1, 0x27, 0x5a, 0x47, 0x37, 0x1b, 0x42, + 0x80, 0x0d, 0xa5, 0x81, 0x0d, 0xc3, 0x6a, 0xf4, 0xaf, 0x1f, 0x12, 0x8a, 0xd7, 0x1b, 0x5d, 0xa2, + 0x11, 0x13, 0x53, 0xd2, 0xa9, 0x1b, 0xa6, 0x4e, 0x75, 0xb8, 0xec, 0x28, 0xd6, 0xb1, 0xa1, 0xd4, + 0x99, 0x62, 0x5d, 0x28, 0xae, 0xac, 0x75, 0x15, 0x7a, 0x64, 0x1f, 0xd6, 0xdb, 0x7a, 0xaf, 0xd1, + 0xd5, 0xbb, 0x7a, 0x83, 0xeb, 0x1f, 0xda, 0x0f, 0xf8, 0x2f, 0xfe, 0x83, 0xff, 0xe5, 0xe0, 0xac, + 0xc8, 0x81, 0x09, 0xdb, 0xba, 0x49, 0x1a, 0xfd, 0xeb, 0xd1, 0xb9, 0x56, 0x5e, 0xf1, 0x75, 0x7a, + 0xb8, 0x7d, 0xa4, 0x68, 0xc4, 0x1c, 0x34, 0x8c, 0xe3, 0x2e, 0x1b, 0xb0, 0x1a, 0x3d, 0x42, 0x71, + 0x92, 0x55, 0x23, 0xcd, 0xca, 0xb4, 0x35, 0xaa, 0xf4, 0x48, 0xcc, 0xe0, 0xb5, 0x71, 0x06, 0x56, + 0xfb, 0x88, 0xf4, 0x70, 0xcc, 0xee, 0xe5, 0x34, 0x3b, 0x9b, 0x2a, 0x6a, 0x43, 0xd1, 0xa8, 0x45, + 0xcd, 0xa8, 0x91, 0xfc, 0x6f, 0x09, 0xc0, 0x4d, 0x5d, 0xa3, 0xa6, 0xae, 0xaa, 0xc4, 0x44, 0xa4, + 0xaf, 0x58, 0x8a, 0xae, 0xc1, 0xfb, 0xa0, 0xc4, 0xe2, 0xe9, 0x60, 0x8a, 0x2b, 0xd2, 0x45, 0x69, + 0x75, 0x66, 0xfd, 0x5a, 0xdd, 0x5f, 0x69, 0x0f, 0xbe, 0x6e, 0x1c, 0x77, 0xd9, 0x80, 0x55, 0x67, + 0xda, 0xf5, 0xfe, 0xf5, 0xfa, 0xde, 0xe1, 0x07, 0xa4, 0x4d, 0x77, 0x08, 0xc5, 0x4d, 0xf8, 0x68, + 0x58, 0x3b, 0x33, 0x1a, 0xd6, 0x80, 0x3f, 0x86, 0x3c, 0x54, 0xb8, 0x07, 0x0a, 0x1c, 0x3d, 0xc7, + 0xd1, 0xd7, 0x52, 0xd1, 0x45, 0xd0, 0x75, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x1a, 0x73, 0xaf, + 0x79, 0x56, 0x40, 0x17, 0xb6, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xab, 0xa0, 0x64, 0x0a, 0xf7, 0x2b, + 0xf9, 0x8b, 0xd2, 0x6a, 0xbe, 0x79, 0x4e, 0x68, 0x95, 0xdc, 0xb0, 0x90, 0xa7, 0x21, 0x3f, 0x92, + 0xc0, 0x52, 0x3c, 0xee, 0x6d, 0xc5, 0xa2, 0xf0, 0xc7, 0xb1, 0xd8, 0xeb, 0xd9, 0x62, 0x67, 0xd6, + 0x3c, 0x72, 0x6f, 0x62, 0x77, 0x24, 0x10, 0x77, 0x0b, 0x14, 0x15, 0x4a, 0x7a, 0x56, 0x25, 0x77, + 0x31, 0xbf, 0x3a, 0xb3, 0x7e, 0xa5, 0x9e, 0x92, 0xc0, 0xf5, 0xb8, 0x77, 0xcd, 0x59, 0x81, 0x5b, + 0xbc, 0xc3, 0x10, 0x90, 0x03, 0x24, 0xff, 0x32, 0x07, 0xca, 0x5b, 0x98, 0xf4, 0x74, 0x6d, 0x9f, + 0xd0, 0x53, 0xd8, 0xb9, 0xdb, 0xa0, 0x60, 0x19, 0xa4, 0x2d, 0x76, 0xee, 0x72, 0x6a, 0x00, 0x9e, + 0x4f, 0xfb, 0x06, 0x69, 0xfb, 0x5b, 0xc6, 0x7e, 0x21, 0x8e, 0x00, 0x5b, 0x60, 0xca, 0xa2, 0x98, + 0xda, 0x16, 0xdf, 0xb0, 0x99, 0xf5, 0xd5, 0x0c, 0x58, 0x5c, 0xbf, 0x39, 0x27, 0xd0, 0xa6, 0x9c, + 0xdf, 0x48, 0xe0, 0xc8, 0x7f, 0xcf, 0x01, 0xe8, 0xe9, 0x6e, 0xea, 0x5a, 0x47, 0xa1, 0x2c, 0x9d, + 0x6f, 0x80, 0x02, 0x1d, 0x18, 0x84, 0x2f, 0x48, 0xb9, 0x79, 0xd9, 0x75, 0xe5, 0xee, 0xc0, 0x20, + 0x4f, 0x86, 0xb5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0xb7, 0x3d, 0x27, 0x73, 0xdc, 0xfa, + 0x95, 0xf0, 0xd4, 0x4f, 0x86, 0xb5, 0x84, 0xb3, 0xa3, 0xee, 0x21, 0x85, 0x1d, 0x84, 0x7d, 0x00, + 0x55, 0x6c, 0xd1, 0xbb, 0x26, 0xd6, 0x2c, 0x67, 0x26, 0xa5, 0x47, 0x44, 0xf8, 0x2f, 0x65, 0xdb, + 0x28, 0x66, 0xd1, 0x5c, 0x11, 0x5e, 0xc0, 0xed, 0x18, 0x1a, 0x4a, 0x98, 0x01, 0x5e, 0x06, 0x53, + 0x26, 0xc1, 0x96, 0xae, 0x55, 0x0a, 0x3c, 0x0a, 0x6f, 0x01, 0x11, 0x1f, 0x45, 0x42, 0x0a, 0x5f, + 0x04, 0xd3, 0x3d, 0x62, 0x59, 0xb8, 0x4b, 0x2a, 0x45, 0xae, 0x38, 0x2f, 0x14, 0xa7, 0x77, 0x9c, + 0x61, 0xe4, 0xca, 0xe5, 0x3f, 0x48, 0x60, 0xd6, 0x5b, 0xb9, 0x53, 0xa8, 0x9c, 0x5b, 0xe1, 0xca, + 0x91, 0xc7, 0x27, 0x4b, 0x4a, 0xc1, 0x7c, 0x9c, 0x0f, 0x38, 0xce, 0xd2, 0x11, 0xfe, 0x04, 0x94, + 0x2c, 0xa2, 0x92, 0x36, 0xd5, 0x4d, 0xe1, 0xf8, 0xcb, 0x19, 0x1d, 0xc7, 0x87, 0x44, 0xdd, 0x17, + 0xa6, 0xcd, 0xb3, 0xcc, 0x73, 0xf7, 0x17, 0xf2, 0x20, 0xe1, 0xbb, 0xa0, 0x44, 0x49, 0xcf, 0x50, + 0x31, 0x25, 0xa2, 0x6a, 0x9e, 0x0f, 0x3a, 0xcf, 0x72, 0x86, 0x81, 0xb5, 0xf4, 0xce, 0x5d, 0xa1, + 0xc6, 0x4b, 0xc6, 0x5b, 0x0c, 0x77, 0x14, 0x79, 0x30, 0xd0, 0x00, 0x73, 0xb6, 0xd1, 0x61, 0x9a, + 0x94, 0x1d, 0xe7, 0xdd, 0x81, 0xc8, 0xa1, 0x6b, 0xe3, 0x57, 0xe5, 0x20, 0x64, 0xd7, 0x5c, 0x12, + 0xb3, 0xcc, 0x85, 0xc7, 0x51, 0x04, 0x1f, 0x6e, 0x80, 0xf9, 0x9e, 0xa2, 0x21, 0x82, 0x3b, 0x83, + 0x7d, 0xd2, 0xd6, 0xb5, 0x8e, 0xc5, 0x53, 0xa9, 0xd8, 0x5c, 0x16, 0x00, 0xf3, 0x3b, 0x61, 0x31, + 0x8a, 0xea, 0xc3, 0x6d, 0xb0, 0xe8, 0x1e, 0xc0, 0xb7, 0x15, 0x8b, 0xea, 0xe6, 0x60, 0x5b, 0xe9, + 0x29, 0xb4, 0x32, 0xc5, 0x71, 0x2a, 0xa3, 0x61, 0x6d, 0x11, 0x25, 0xc8, 0x51, 0xa2, 0x95, 0xfc, + 0xdb, 0x29, 0x30, 0x1f, 0x39, 0x17, 0xe0, 0x3d, 0xb0, 0xd4, 0xb6, 0x4d, 0x93, 0x68, 0x74, 0xd7, + 0xee, 0x1d, 0x12, 0x73, 0xbf, 0x7d, 0x44, 0x3a, 0xb6, 0x4a, 0x3a, 0x7c, 0x5b, 0x8b, 0xcd, 0xaa, + 0xf0, 0x75, 0x69, 0x33, 0x51, 0x0b, 0xa5, 0x58, 0xc3, 0x77, 0x00, 0xd4, 0xf8, 0xd0, 0x8e, 0x62, + 0x59, 0x1e, 0x66, 0x8e, 0x63, 0x7a, 0xa5, 0xb8, 0x1b, 0xd3, 0x40, 0x09, 0x56, 0xcc, 0xc7, 0x0e, + 0xb1, 0x14, 0x93, 0x74, 0xa2, 0x3e, 0xe6, 0xc3, 0x3e, 0x6e, 0x25, 0x6a, 0xa1, 0x14, 0x6b, 0xf8, + 0x2a, 0x98, 0x71, 0x66, 0xe3, 0x6b, 0x2e, 0x36, 0x67, 0x41, 0x80, 0xcd, 0xec, 0xfa, 0x22, 0x14, + 0xd4, 0x63, 0xa1, 0xe9, 0x87, 0x16, 0x31, 0xfb, 0xa4, 0x73, 0xcb, 0x21, 0x07, 0xac, 0x83, 0x16, + 0x79, 0x07, 0xf5, 0x42, 0xdb, 0x8b, 0x69, 0xa0, 0x04, 0x2b, 0x16, 0x9a, 0x93, 0x35, 0xb1, 0xd0, + 0xa6, 0xc2, 0xa1, 0x1d, 0x24, 0x6a, 0xa1, 0x14, 0x6b, 0x96, 0x7b, 0x8e, 0xcb, 0x1b, 0x7d, 0xac, + 0xa8, 0xf8, 0x50, 0x25, 0x95, 0xe9, 0x70, 0xee, 0xed, 0x86, 0xc5, 0x28, 0xaa, 0x0f, 0x6f, 0x81, + 0xf3, 0xce, 0xd0, 0x81, 0x86, 0x3d, 0x90, 0x12, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdf, 0x8d, 0x2a, + 0xa0, 0xb8, 0x0d, 0xbc, 0x01, 0xe6, 0xda, 0xba, 0xaa, 0xf2, 0x7c, 0xdc, 0xd4, 0x6d, 0x8d, 0x56, + 0xca, 0x1c, 0x05, 0xb2, 0x1a, 0xda, 0x0c, 0x49, 0x50, 0x44, 0x13, 0xfe, 0x0c, 0x80, 0xb6, 0xdb, + 0x18, 0xac, 0x0a, 0x18, 0xc3, 0x00, 0xe2, 0x6d, 0xc9, 0xef, 0xcc, 0xde, 0x90, 0x85, 0x02, 0x90, + 0xf2, 0xc7, 0x12, 0x58, 0x4e, 0x29, 0x74, 0xf8, 0x56, 0xa8, 0x09, 0x5e, 0x89, 0x34, 0xc1, 0x0b, + 0x29, 0x66, 0x81, 0x4e, 0x78, 0x04, 0x66, 0x19, 0x21, 0x51, 0xb4, 0xae, 0xa3, 0x22, 0xce, 0xb2, + 0x46, 0x6a, 0x00, 0x28, 0xa8, 0xed, 0x9f, 0xca, 0xe7, 0x47, 0xc3, 0xda, 0x6c, 0x48, 0x86, 0xc2, + 0xc0, 0xf2, 0xaf, 0x72, 0x00, 0x6c, 0x11, 0x43, 0xd5, 0x07, 0x3d, 0xa2, 0x9d, 0x06, 0xa7, 0xb9, + 0x13, 0xe2, 0x34, 0x2f, 0xa4, 0x6f, 0x89, 0xe7, 0x54, 0x2a, 0xa9, 0x79, 0x37, 0x42, 0x6a, 0x5e, + 0xcc, 0x02, 0xf6, 0x74, 0x56, 0xf3, 0x59, 0x1e, 0x2c, 0xf8, 0xca, 0x3e, 0xad, 0xb9, 0x19, 0xda, + 0xd1, 0x17, 0x22, 0x3b, 0xba, 0x9c, 0x60, 0xf2, 0x95, 0xf1, 0x9a, 0x0f, 0xc0, 0x1c, 0x63, 0x1d, + 0xce, 0xfe, 0x71, 0x4e, 0x33, 0x35, 0x31, 0xa7, 0xf1, 0x3a, 0xd1, 0x76, 0x08, 0x09, 0x45, 0x90, + 0x53, 0x38, 0xd4, 0xf4, 0x37, 0x91, 0x43, 0xfd, 0x51, 0x02, 0x73, 0xfe, 0x36, 0x9d, 0x02, 0x89, + 0xba, 0x1d, 0x26, 0x51, 0xcf, 0x67, 0x48, 0xce, 0x14, 0x16, 0xf5, 0x59, 0x21, 0xe8, 0x3a, 0xa7, + 0x51, 0xab, 0xec, 0x0a, 0x66, 0xa8, 0x4a, 0x1b, 0x5b, 0xa2, 0xdf, 0x9e, 0x75, 0xae, 0x5f, 0xce, + 0x18, 0xf2, 0xa4, 0x21, 0xc2, 0x95, 0xfb, 0x6a, 0x09, 0x57, 0xfe, 0xd9, 0x10, 0xae, 0x1f, 0x81, + 0x92, 0xe5, 0x52, 0xad, 0x02, 0x87, 0xbc, 0x92, 0xa9, 0xb0, 0x05, 0xcb, 0xf2, 0xa0, 0x3d, 0x7e, + 0xe5, 0xc1, 0x25, 0x31, 0xab, 0xe2, 0xd7, 0xc9, 0xac, 0x58, 0xa2, 0x1b, 0xd8, 0xb6, 0x48, 0x87, + 0x17, 0x55, 0xc9, 0x4f, 0xf4, 0x16, 0x1f, 0x45, 0x42, 0x0a, 0x0f, 0xc0, 0xb2, 0x61, 0xea, 0x5d, + 0x93, 0x58, 0xd6, 0x16, 0xc1, 0x1d, 0x55, 0xd1, 0x88, 0x1b, 0x80, 0xd3, 0x13, 0x2f, 0x8c, 0x86, + 0xb5, 0xe5, 0x56, 0xb2, 0x0a, 0x4a, 0xb3, 0x95, 0xff, 0x5c, 0x00, 0xe7, 0xa2, 0x67, 0x63, 0x0a, + 0x4d, 0x91, 0x4e, 0x44, 0x53, 0xae, 0x06, 0xf2, 0xd4, 0xe1, 0x70, 0x81, 0xa7, 0x82, 0x58, 0xae, + 0x6e, 0x80, 0x79, 0x41, 0x4b, 0x5c, 0xa1, 0x20, 0x6a, 0xde, 0xf6, 0x1c, 0x84, 0xc5, 0x28, 0xaa, + 0x0f, 0x6f, 0x82, 0x59, 0x93, 0x33, 0x2f, 0x17, 0xc0, 0x61, 0x2f, 0xff, 0x27, 0x00, 0x66, 0x51, + 0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x28, 0x84, 0x99, 0xcb, 0x46, 0x54, + 0x01, 0xc5, 0x6d, 0xe0, 0x0e, 0x58, 0xb0, 0xb5, 0x38, 0x94, 0x93, 0x6b, 0x17, 0x04, 0xd4, 0xc2, + 0x41, 0x5c, 0x05, 0x25, 0xd9, 0xc1, 0xfb, 0x21, 0x32, 0x33, 0xc5, 0xcf, 0x93, 0xab, 0x19, 0x6a, + 0x22, 0x33, 0x9b, 0x49, 0xa0, 0x5a, 0xa5, 0xac, 0x54, 0x4b, 0xfe, 0x48, 0x02, 0x30, 0x5e, 0x87, + 0x63, 0x5f, 0x02, 0x62, 0x16, 0x81, 0x8e, 0xa9, 0x24, 0xf3, 0x9f, 0x6b, 0x19, 0xf9, 0x8f, 0x7f, + 0xa0, 0x66, 0x23, 0x40, 0x62, 0xa1, 0x4f, 0xe7, 0x51, 0x27, 0x2b, 0x01, 0xf2, 0x9d, 0x7a, 0x06, + 0x04, 0x28, 0x00, 0xf6, 0x74, 0x02, 0xf4, 0x8f, 0x1c, 0x58, 0xf0, 0x95, 0x33, 0x13, 0xa0, 0x04, + 0x93, 0xef, 0x1e, 0x76, 0xb2, 0x91, 0x12, 0x7f, 0xe9, 0xfe, 0x97, 0x48, 0x89, 0xef, 0x55, 0x0a, + 0x29, 0xf9, 0x7d, 0x2e, 0xe8, 0xfa, 0x84, 0xa4, 0xe4, 0x19, 0xbc, 0x70, 0x7c, 0xe3, 0x78, 0x8d, + 0xfc, 0x49, 0x1e, 0x9c, 0x8b, 0xd6, 0x61, 0xa8, 0x41, 0x4a, 0x63, 0x1b, 0x64, 0x0b, 0x2c, 0x3e, + 0xb0, 0x55, 0x75, 0xc0, 0x63, 0x08, 0x74, 0x49, 0xa7, 0xb5, 0xfe, 0xbf, 0xb0, 0x5c, 0xfc, 0x61, + 0x82, 0x0e, 0x4a, 0xb4, 0x8c, 0xf7, 0xcb, 0xc2, 0x97, 0xed, 0x97, 0xc5, 0x13, 0xf4, 0xcb, 0x64, + 0xca, 0x91, 0x3f, 0x11, 0xe5, 0x98, 0xac, 0x59, 0x26, 0x1c, 0x5c, 0x63, 0xaf, 0xfe, 0x23, 0x09, + 0x2c, 0x25, 0x5f, 0xb8, 0xa1, 0x0a, 0xe6, 0x7a, 0xf8, 0x61, 0xf0, 0xe1, 0x63, 0x5c, 0x13, 0xb1, + 0xa9, 0xa2, 0xd6, 0x9d, 0x4f, 0x46, 0xf5, 0x3b, 0x1a, 0xdd, 0x33, 0xf7, 0xa9, 0xa9, 0x68, 0x5d, + 0xa7, 0xf3, 0xee, 0x84, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x4a, 0x3d, 0xfc, 0x70, 0xdf, 0x36, + 0xbb, 0x49, 0x1d, 0x32, 0xdb, 0x3c, 0xbc, 0x00, 0x76, 0x04, 0x0a, 0xf2, 0xf0, 0xe4, 0x2f, 0x24, + 0xb0, 0x9c, 0xd2, 0x55, 0xbf, 0x45, 0x51, 0xee, 0x81, 0x8b, 0xa1, 0x20, 0x59, 0x55, 0x92, 0x07, + 0xb6, 0xca, 0x0b, 0x54, 0x10, 0x99, 0x2b, 0xa0, 0x6c, 0x60, 0x93, 0x2a, 0x1e, 0x0d, 0x2e, 0x36, + 0x67, 0x47, 0xc3, 0x5a, 0xb9, 0xe5, 0x0e, 0x22, 0x5f, 0x2e, 0xff, 0x47, 0x02, 0xc5, 0xfd, 0x36, + 0x56, 0xc9, 0x29, 0x30, 0x89, 0xad, 0x10, 0x93, 0x48, 0x7f, 0xa5, 0xe7, 0xfe, 0xa4, 0x92, 0x88, + 0xed, 0x08, 0x89, 0xb8, 0x34, 0x06, 0xe7, 0xe9, 0xfc, 0xe1, 0x0d, 0x50, 0xf6, 0xa6, 0x9b, 0xec, + 0x70, 0x93, 0x7f, 0x97, 0x03, 0x33, 0x81, 0x29, 0x26, 0x3c, 0x1a, 0xef, 0x87, 0xfa, 0x01, 0x2b, + 0xfa, 0xf5, 0x2c, 0x81, 0xd4, 0xdd, 0xb3, 0xff, 0x6d, 0x8d, 0x9a, 0xc1, 0xcb, 0x63, 0xbc, 0x25, + 0xbc, 0x09, 0xe6, 0x28, 0x36, 0xbb, 0x84, 0xba, 0x32, 0xbe, 0x60, 0x65, 0xff, 0x31, 0xe5, 0x6e, + 0x48, 0x8a, 0x22, 0xda, 0x2b, 0x37, 0xc1, 0x6c, 0x68, 0x32, 0x78, 0x0e, 0xe4, 0x8f, 0xc9, 0xc0, + 0xa1, 0x54, 0x88, 0xfd, 0x09, 0x17, 0x41, 0xb1, 0x8f, 0x55, 0xdb, 0xc9, 0xf3, 0x32, 0x72, 0x7e, + 0xdc, 0xc8, 0xbd, 0x2e, 0xc9, 0xbf, 0x66, 0x8b, 0xe3, 0x27, 0xe7, 0x29, 0x64, 0xd7, 0x3b, 0xa1, + 0xec, 0x4a, 0xff, 0x60, 0x18, 0x2c, 0x99, 0xb4, 0x1c, 0x43, 0x91, 0x1c, 0x7b, 0x29, 0x13, 0xda, + 0xd3, 0x33, 0xed, 0x9f, 0x39, 0xb0, 0x18, 0xd0, 0xf6, 0xa9, 0xea, 0xf7, 0x43, 0x54, 0x75, 0x35, + 0x42, 0x55, 0x2b, 0x49, 0x36, 0xdf, 0x71, 0xd5, 0xf1, 0x5c, 0xf5, 0x4f, 0x12, 0x98, 0x0f, 0xac, + 0xdd, 0x29, 0x90, 0xd5, 0x3b, 0x61, 0xb2, 0x7a, 0x29, 0x4b, 0xd2, 0xa4, 0xb0, 0xd5, 0x7f, 0x49, + 0xa0, 0x11, 0xd0, 0x6a, 0x11, 0xd3, 0x52, 0x2c, 0x4a, 0x34, 0x7a, 0x4f, 0x57, 0xed, 0x1e, 0xd9, + 0x54, 0xb1, 0xd2, 0x43, 0x84, 0x0d, 0x28, 0xba, 0xd6, 0xd2, 0x55, 0xa5, 0x3d, 0x80, 0x18, 0xcc, + 0x7c, 0x78, 0x44, 0xb4, 0x2d, 0xa2, 0x12, 0x2a, 0x3e, 0x6b, 0x95, 0x9b, 0x6f, 0xb9, 0x5f, 0x79, + 0xde, 0xf3, 0x45, 0x4f, 0x86, 0xb5, 0xd5, 0x2c, 0x88, 0x3c, 0xcb, 0x82, 0x98, 0xf0, 0xa7, 0x00, + 0xb0, 0x9f, 0xfc, 0x3c, 0xea, 0x88, 0x84, 0x7b, 0xd3, 0xad, 0xca, 0xf7, 0x3c, 0xc9, 0x44, 0x13, + 0x04, 0x10, 0xe5, 0xbf, 0x4e, 0x87, 0xf6, 0xec, 0x5b, 0xff, 0x74, 0xf8, 0x73, 0xb0, 0xd8, 0xf7, + 0x57, 0xc7, 0x55, 0x60, 0xa4, 0x36, 0x1f, 0xbd, 0x1e, 0x7b, 0xf0, 0x49, 0xeb, 0xea, 0x53, 0xe9, + 0x7b, 0x09, 0x70, 0x28, 0x71, 0x12, 0xf8, 0x2a, 0x98, 0x61, 0x64, 0x54, 0x69, 0x93, 0x5d, 0xdc, + 0x73, 0xeb, 0xc9, 0xfb, 0x2a, 0xb8, 0xef, 0x8b, 0x50, 0x50, 0x0f, 0x1e, 0x81, 0x05, 0x43, 0xef, + 0xec, 0x60, 0x0d, 0x77, 0x09, 0x63, 0x57, 0xce, 0x56, 0xf2, 0xf7, 0xc4, 0x72, 0xf3, 0x35, 0xf7, + 0xad, 0xa8, 0x15, 0x57, 0x61, 0xf7, 0xee, 0x84, 0x61, 0x9e, 0x04, 0x49, 0x90, 0xd0, 0x8c, 0x7d, + 0xc9, 0x76, 0x5e, 0xf2, 0xd7, 0xb3, 0x14, 0xd6, 0x09, 0xbf, 0x65, 0xa7, 0x3d, 0x97, 0x96, 0x4e, + 0xf4, 0x5c, 0x9a, 0x70, 0x6f, 0x2c, 0x4f, 0x78, 0x6f, 0xfc, 0x44, 0x02, 0x97, 0x8c, 0x0c, 0xb5, + 0x54, 0x01, 0x7c, 0x6d, 0x6e, 0x67, 0x59, 0x9b, 0x2c, 0xb5, 0xd9, 0x5c, 0x1d, 0x0d, 0x6b, 0x97, + 0xb2, 0x68, 0xa2, 0x4c, 0xfe, 0xc9, 0x1f, 0x15, 0xc1, 0xf9, 0x58, 0xb7, 0xfc, 0x1a, 0x1f, 0x6f, + 0x63, 0x37, 0xc9, 0xfc, 0x04, 0x37, 0xc9, 0x0d, 0x30, 0x2f, 0xfe, 0x1f, 0x20, 0x72, 0x11, 0xf5, + 0x36, 0x76, 0x33, 0x2c, 0x46, 0x51, 0xfd, 0xa4, 0xc7, 0xe3, 0xe2, 0x84, 0x8f, 0xc7, 0x41, 0x2f, + 0xc4, 0xff, 0xb7, 0x39, 0x65, 0x18, 0xf7, 0x42, 0xfc, 0x9b, 0x5b, 0x54, 0x9f, 0x91, 0x44, 0x07, + 0xd5, 0x43, 0x98, 0x0e, 0x93, 0xc4, 0x83, 0x90, 0x14, 0x45, 0xb4, 0xbf, 0xd4, 0x37, 0x6f, 0x9c, + 0xf0, 0xcd, 0x7b, 0x2d, 0x4b, 0xfe, 0x66, 0x7f, 0x27, 0x4e, 0xbc, 0xf1, 0xcf, 0x4c, 0x7e, 0xe3, + 0x97, 0xff, 0x22, 0x81, 0xe7, 0x52, 0x4f, 0x17, 0xb8, 0x11, 0xa2, 0x70, 0x6b, 0x11, 0x0a, 0xf7, + 0xbd, 0x54, 0xc3, 0x00, 0x8f, 0x33, 0x93, 0x9f, 0x90, 0xdf, 0xc8, 0xf6, 0x84, 0x9c, 0x70, 0x0f, + 0x1c, 0xff, 0x96, 0xdc, 0x5c, 0x7b, 0xf4, 0xb8, 0x7a, 0xe6, 0xd3, 0xc7, 0xd5, 0x33, 0x9f, 0x3f, + 0xae, 0x9e, 0xf9, 0xc5, 0xa8, 0x2a, 0x3d, 0x1a, 0x55, 0xa5, 0x4f, 0x47, 0x55, 0xe9, 0xf3, 0x51, + 0x55, 0xfa, 0xdb, 0xa8, 0x2a, 0xfd, 0xe6, 0x8b, 0xea, 0x99, 0xf7, 0xa7, 0xc5, 0x8c, 0xff, 0x0d, + 0x00, 0x00, 0xff, 0xff, 0x3e, 0x13, 0x3b, 0xc7, 0x94, 0x2b, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2522,6 +2560,39 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WhenScaled) + copy(dAtA[i:], m.WhenScaled) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenScaled))) + i-- + dAtA[i] = 0x12 + i -= len(m.WhenDeleted) + copy(dAtA[i:], m.WhenDeleted) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenDeleted))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2542,6 +2613,21 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PersistentVolumeClaimRetentionPolicy != nil { + { + size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 if m.RevisionHistoryLimit != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) i-- @@ -2631,6 +2717,9 @@ func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x58 if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -3225,6 +3314,19 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WhenDeleted) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenScaled) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetSpec) Size() (n int) { if m == nil { return 0 @@ -3255,6 +3357,11 @@ func (m *StatefulSetSpec) Size() (n int) { if m.RevisionHistoryLimit != nil { n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.PersistentVolumeClaimRetentionPolicy != nil { + l = m.PersistentVolumeClaimRetentionPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3282,6 +3389,7 @@ func (m *StatefulSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) return n } @@ -3711,6 +3819,17 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetPersistentVolumeClaimRetentionPolicy{`, + `WhenDeleted:` + fmt.Sprintf("%v", this.WhenDeleted) + `,`, + `WhenScaled:` + fmt.Sprintf("%v", this.WhenScaled) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetSpec) String() string { if this == nil { return "nil" @@ -3729,6 +3848,8 @@ func (this *StatefulSetSpec) String() string { `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, `}`, }, "") return s @@ -3752,6 +3873,7 @@ func (this *StatefulSetStatus) String() string { `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `}`, }, "") return s @@ -8217,6 +8339,120 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenDeleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenDeleted = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenScaled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenScaled = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8486,6 +8722,61 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } } m.RevisionHistoryLimit = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaimRetentionPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaimRetentionPolicy == nil { + m.PersistentVolumeClaimRetentionPolicy = &StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if err := m.PersistentVolumeClaimRetentionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8749,6 +9040,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index ff306ba6a9a91..6d9505865a50a 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -169,8 +169,8 @@ message DaemonSetStatus { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 desiredNumberScheduled = 3; - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // Total number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition by passing the readinessProbe. optional int32 numberReady = 4; // The most recent generation observed by the daemon set controller. @@ -327,7 +327,7 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional optional int32 readyReplicas = 7; @@ -463,7 +463,7 @@ message ReplicaSetStatus { // +optional optional int32 fullyLabeledReplicas = 2; - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition. // +optional optional int32 readyReplicas = 4; @@ -487,7 +487,7 @@ message RollingUpdateDaemonSet { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding down to a minimum of one. + // number is calculated from percentage by rounding up. // This cannot be 0 if MaxSurge is 0 // Default value is 1. // Example: when this is set to 30%, at most 30% of the total number of nodes @@ -519,7 +519,7 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. + // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -592,6 +592,7 @@ message ScaleStatus { // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional + // +mapType=atomic map selector = 2; // label selector for pods that should match the replicas count. This is a serializated @@ -655,6 +656,23 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +message StatefulSetPersistentVolumeClaimRetentionPolicy { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + optional string whenDeleted = 1; + + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + optional string whenScaled = 2; +} + // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { // replicas is the desired number of replicas of the given Template. @@ -714,6 +732,19 @@ message StatefulSetSpec { // consists of all revisions not represented by a currently applied // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + // +optional + optional int32 minReadySeconds = 9; + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -726,7 +757,7 @@ message StatefulSetStatus { // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. optional int32 readyReplicas = 3; // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -756,6 +787,10 @@ message StatefulSetStatus { // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + optional int32 availableReplicas = 11; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 316a0ad24d25b..332bc7ed82bf2 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -45,6 +45,7 @@ type ScaleStatus struct { // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional + // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // label selector for pods that should match the replicas count. This is a serializated @@ -82,6 +83,7 @@ type Scale struct { // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=Scale,result=Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 @@ -167,6 +169,40 @@ type RollingUpdateStatefulSetStrategy struct { Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` } +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimRetentionPolicy. + RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -226,6 +262,19 @@ type StatefulSetSpec struct { // consists of all revisions not represented by a currently applied // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -238,7 +287,7 @@ type StatefulSetStatus struct { // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -268,6 +317,10 @@ type StatefulSetStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } type StatefulSetConditionType string @@ -448,7 +501,7 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` @@ -554,7 +607,7 @@ type RollingUpdateDaemonSet struct { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding down to a minimum of one. + // number is calculated from percentage by rounding up. // This cannot be 0 if MaxSurge is 0 // Default value is 1. // Example: when this is set to 30%, at most 30% of the total number of nodes @@ -586,7 +639,7 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. + // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -641,8 +694,8 @@ type DaemonSetStatus struct { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // Total number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition by passing the readinessProbe. NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` // The most recent generation observed by the daemon set controller. @@ -845,7 +898,7 @@ type ReplicaSetStatus struct { // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 51d5522348431..454c632dc2578 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -100,7 +100,7 @@ var map_DaemonSetStatus = map[string]string{ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "numberReady": "Total number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition by passing the readinessProbe.", "observedGeneration": "The most recent generation observed by the daemon set controller.", "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", @@ -179,7 +179,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", + "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -250,7 +250,7 @@ var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", + "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", @@ -262,8 +262,8 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", - "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { @@ -351,16 +351,28 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ + "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", +} + +func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { + return map_StatefulSetPersistentVolumeClaimRetentionPolicy +} + var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -371,13 +383,14 @@ var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "readyReplicas": "readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition.", "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 0a84d1b08a8ae..8293b9886b6c9 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -754,6 +755,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in @@ -781,6 +798,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(int32) **out = **in } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go index 3368a18964de7..1ead8a4c138d6 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index c91fd92a57193..8f928be408f06 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -58,11 +58,15 @@ message ExtraValue { // TokenRequest requests a token for a given service account. message TokenRequest { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + // Spec holds information about the request being evaluated optional TokenRequestSpec spec = 2; + // Status is filled in by the server and indicates whether the token can be authenticated. // +optional optional TokenRequestStatus status = 3; } @@ -105,6 +109,8 @@ message TokenRequestStatus { // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. message TokenReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 6f5f0ad1a308b..7ba247d633cfe 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -31,6 +31,9 @@ const ( // It can be repeated multiplied times for multiple groups. ImpersonateGroupHeader = "Impersonate-Group" + // ImpersonateUIDHeader is used to impersonate a particular UID during an API server request + ImpersonateUIDHeader = "Impersonate-Uid" + // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the // extra map[string][]string for user.Info. The key will be every after the prefix. // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple @@ -48,6 +51,8 @@ const ( // plugin in the kube-apiserver. type TokenReview struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -130,10 +135,15 @@ func (t ExtraValue) String() string { // TokenRequest requests a token for a given service account. type TokenRequest struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec holds information about the request being evaluated Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the token can be authenticated. // +optional Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 09f6b920fd8d3..f9a88a3df25c8 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -40,7 +40,10 @@ func (BoundObjectReference) SwaggerDoc() map[string]string { } var map_TokenRequest = map[string]string{ - "": "TokenRequest requests a token for a given service account.", + "": "TokenRequest requests a token for a given service account.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the token can be authenticated.", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -69,9 +72,10 @@ func (TokenRequestStatus) SwaggerDoc() map[string]string { } var map_TokenReview = map[string]string{ - "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request can be authenticated.", + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", } func (TokenReview) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index aca99c42b765d..2af533191ba37 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index d3bff49eb57eb..67a32b320e650 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -41,13 +41,15 @@ message ExtraValue { // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. message TokenReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; - // Status is filled in by the server and indicates whether the request can be authenticated. + // Status is filled in by the server and indicates whether the token can be authenticated. // +optional optional TokenReviewStatus status = 3; } diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 121b3461811b3..08e1e09b640ab 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -35,13 +35,15 @@ import ( // plugin in the kube-apiserver. type TokenReview struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec holds information about the request being evaluated Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status is filled in by the server and indicates whether the request can be authenticated. + // Status is filled in by the server and indicates whether the token can be authenticated. // +optional Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 8c9acfb5b2492..1086955c3a872 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -28,9 +28,10 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_TokenReview = map[string]string{ - "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request can be authenticated.", + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the token can be authenticated.", } func (TokenReview) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index a5d82a8100a57..059ec1a864785 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index 9b5744db787e6..e448106e419ca 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index 931b0f4995e0c..0170ee11fd620 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -41,6 +41,8 @@ message ExtraValue { // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. message LocalSubjectAccessReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -134,6 +136,8 @@ message ResourceRule { // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action message SelfSubjectAccessReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -164,6 +168,8 @@ message SelfSubjectAccessReviewSpec { // drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. // SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. message SelfSubjectRulesReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -175,6 +181,7 @@ message SelfSubjectRulesReview { optional SubjectRulesReviewStatus status = 3; } +// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview. message SelfSubjectRulesReviewSpec { // Namespace to evaluate rules for. Required. optional string namespace = 1; @@ -182,6 +189,8 @@ message SelfSubjectRulesReviewSpec { // SubjectAccessReview checks whether or not a user or group can perform an action. message SubjectAccessReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index be8913eb4f0a2..d1fe483f9649b 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -30,6 +30,8 @@ import ( // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,6 +53,8 @@ type SubjectAccessReview struct { // to check whether they can perform an action type SelfSubjectAccessReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -71,6 +75,8 @@ type SelfSubjectAccessReview struct { // checking. type LocalSubjectAccessReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -200,6 +206,8 @@ type SubjectAccessReviewStatus struct { // SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. type SelfSubjectRulesReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -211,6 +219,7 @@ type SelfSubjectRulesReview struct { Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview. type SelfSubjectRulesReviewSpec struct { // Namespace to evaluate rules for. Required. Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 8445f71164abb..2e5fbea7ad64b 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -28,9 +28,10 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ - "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", - "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", } func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { @@ -85,9 +86,10 @@ func (ResourceRule) SwaggerDoc() map[string]string { } var map_SelfSubjectAccessReview = map[string]string{ - "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", - "spec": "Spec holds information about the request being evaluated. user and groups must be empty", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", } func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { @@ -105,9 +107,10 @@ func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { } var map_SelfSubjectRulesReview = map[string]string{ - "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", - "spec": "Spec holds information about the request being evaluated.", - "status": "Status is filled in by the server and indicates the set of actions a user can perform.", + "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated.", + "status": "Status is filled in by the server and indicates the set of actions a user can perform.", } func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { @@ -115,6 +118,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { } var map_SelfSubjectRulesReviewSpec = map[string]string{ + "": "SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.", "namespace": "Namespace to evaluate rules for. Required.", } @@ -123,9 +127,10 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { } var map_SubjectAccessReview = map[string]string{ - "": "SubjectAccessReview checks whether or not a user or group can perform an action.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", } func (SubjectAccessReview) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index 1d11b38b082cf..f1d49eb386ec9 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index a9e221608efde..4b1a55e0ca69d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -41,6 +41,8 @@ message ExtraValue { // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. message LocalSubjectAccessReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -134,6 +136,8 @@ message ResourceRule { // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action message SelfSubjectAccessReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -164,6 +168,8 @@ message SelfSubjectAccessReviewSpec { // drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. // SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. message SelfSubjectRulesReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -175,6 +181,7 @@ message SelfSubjectRulesReview { optional SubjectRulesReviewStatus status = 3; } +// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview. message SelfSubjectRulesReviewSpec { // Namespace to evaluate rules for. Required. optional string namespace = 1; @@ -182,6 +189,8 @@ message SelfSubjectRulesReviewSpec { // SubjectAccessReview checks whether or not a user or group can perform an action. message SubjectAccessReview { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index c62b5ea21375d..2653098655668 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -33,6 +33,8 @@ import ( // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -57,6 +59,8 @@ type SubjectAccessReview struct { // to check whether they can perform an action type SelfSubjectAccessReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,6 +84,8 @@ type SelfSubjectAccessReview struct { // checking. type LocalSubjectAccessReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -212,6 +218,8 @@ type SubjectAccessReviewStatus struct { // SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. type SelfSubjectRulesReview struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -223,6 +231,7 @@ type SelfSubjectRulesReview struct { Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview. type SelfSubjectRulesReviewSpec struct { // Namespace to evaluate rules for. Required. Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 3ae6e72060141..2d291189eb20c 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -28,9 +28,10 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ - "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", - "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", } func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { @@ -85,9 +86,10 @@ func (ResourceRule) SwaggerDoc() map[string]string { } var map_SelfSubjectAccessReview = map[string]string{ - "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", - "spec": "Spec holds information about the request being evaluated. user and groups must be empty", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", } func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { @@ -105,9 +107,10 @@ func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { } var map_SelfSubjectRulesReview = map[string]string{ - "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", - "spec": "Spec holds information about the request being evaluated.", - "status": "Status is filled in by the server and indicates the set of actions a user can perform.", + "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated.", + "status": "Status is filled in by the server and indicates the set of actions a user can perform.", } func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { @@ -115,6 +118,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { } var map_SelfSubjectRulesReviewSpec = map[string]string{ + "": "SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.", "namespace": "Namespace to evaluate rules for. Required.", } @@ -123,9 +127,10 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { } var map_SubjectAccessReview = map[string]string{ - "": "SubjectAccessReview checks whether or not a user or group can perform an action.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", } func (SubjectAccessReview) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index 58b2dfe753e46..13f09cf2d2787 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go index fcb75dd914b3e..f9817df5833ab 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 08b8667c8d514..606a50da56075 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -85,6 +85,7 @@ message ContainerResourceMetricStatus { } // CrossVersionObjectReference contains enough information to let you identify the referred resource. +// +structType=atomic message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 3343177b2ad91..8e0a46525e167 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -23,6 +23,7 @@ import ( ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. +// +structType=atomic type CrossVersionObjectReference struct { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` @@ -149,6 +150,7 @@ type ScaleStatus struct { // the types below are used in the alpha metrics annotation // MetricSourceType indicates the type of metric. +// +enum type MetricSourceType string const ( diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 05ae6ebda7262..603e6aa8cbaa7 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go new file mode 100644 index 0000000000000..f96a059b6c7e5 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v2 // import "k8s.io/api/autoscaling/v2" diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go new file mode 100644 index 0000000000000..408413a78c788 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go @@ -0,0 +1,6599 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto + +package v2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{2} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{3} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{4} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } +func (*HPAScalingPolicy) ProtoMessage() {} +func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{5} +} +func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_HPAScalingPolicy.Merge(m, src) +} +func (m *HPAScalingPolicy) XXX_Size() int { + return m.Size() +} +func (m *HPAScalingPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo + +func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } +func (*HPAScalingRules) ProtoMessage() {} +func (*HPAScalingRules) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{6} +} +func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HPAScalingRules) XXX_Merge(src proto.Message) { + xxx_messageInfo_HPAScalingRules.Merge(m, src) +} +func (m *HPAScalingRules) XXX_Size() int { + return m.Size() +} +func (m *HPAScalingRules) XXX_DiscardUnknown() { + xxx_messageInfo_HPAScalingRules.DiscardUnknown(m) +} + +var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{7} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } +func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} +func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{8} +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src) +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{9} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{10} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{11} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{12} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{13} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{14} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{15} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{16} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo + +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{17} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{18} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{19} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{20} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{21} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{22} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{23} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ContainerResourceMetricStatus") + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ExternalMetricStatus") + proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2.HPAScalingPolicy") + proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2.HPAScalingRules") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerBehavior") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricIdentifier)(nil), "k8s.io.api.autoscaling.v2.MetricIdentifier") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2.MetricStatus") + proto.RegisterType((*MetricTarget)(nil), "k8s.io.api.autoscaling.v2.MetricTarget") + proto.RegisterType((*MetricValueStatus)(nil), "k8s.io.api.autoscaling.v2.MetricValueStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ResourceMetricStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_b14d4df4b5f3935e) +} + +var fileDescriptor_b14d4df4b5f3935e = []byte{ + // 1735 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x1b, 0xc7, + 0x19, 0xd7, 0x92, 0xd4, 0x6b, 0xa8, 0xe7, 0xf8, 0xc5, 0xca, 0x30, 0x29, 0x6c, 0xdd, 0xda, 0x75, + 0xeb, 0x65, 0xcd, 0xba, 0x86, 0x51, 0x9f, 0xb4, 0x72, 0x5b, 0x0b, 0x96, 0x2a, 0x7a, 0x68, 0x5b, + 0x6d, 0xd1, 0x16, 0x1e, 0xee, 0x8e, 0xa8, 0xa9, 0xc8, 0x5d, 0x62, 0x77, 0x49, 0x5b, 0x06, 0x0a, + 0xf4, 0xd2, 0x7b, 0xd1, 0xc2, 0x28, 0xfa, 0x3f, 0x18, 0x39, 0x25, 0x70, 0x0e, 0x09, 0x10, 0x20, + 0x39, 0xf8, 0x12, 0xc0, 0x87, 0x1c, 0x7c, 0x22, 0x62, 0x06, 0xc8, 0x31, 0x7f, 0x80, 0x4f, 0xc1, + 0x3c, 0xf6, 0xc9, 0x97, 0xe8, 0xc8, 0x02, 0x74, 0xe3, 0xcc, 0x7c, 0xdf, 0xef, 0x7b, 0xcc, 0xf7, + 0x9a, 0x25, 0xd0, 0xf7, 0x6f, 0xba, 0x1a, 0xb5, 0x8b, 0xfb, 0xad, 0x2a, 0x71, 0x2c, 0xe2, 0x11, + 0xb7, 0xd8, 0x26, 0x96, 0x69, 0x3b, 0x45, 0x79, 0x80, 0x9b, 0xb4, 0x88, 0x5b, 0x9e, 0xed, 0x1a, + 0xb8, 0x4e, 0xad, 0x5a, 0xb1, 0x5d, 0x2a, 0xd6, 0x88, 0x45, 0x1c, 0xec, 0x11, 0x53, 0x6b, 0x3a, + 0xb6, 0x67, 0xc3, 0x1f, 0x09, 0x52, 0x0d, 0x37, 0xa9, 0x16, 0x21, 0xd5, 0xda, 0xa5, 0x95, 0xab, + 0x35, 0xea, 0xed, 0xb5, 0xaa, 0x9a, 0x61, 0x37, 0x8a, 0x35, 0xbb, 0x66, 0x17, 0x39, 0x47, 0xb5, + 0xb5, 0xcb, 0x57, 0x7c, 0xc1, 0x7f, 0x09, 0xa4, 0x15, 0x35, 0x22, 0xd4, 0xb0, 0x1d, 0x52, 0x6c, + 0x5f, 0x4b, 0x4a, 0x5b, 0xb9, 0x1e, 0xd2, 0x34, 0xb0, 0xb1, 0x47, 0x2d, 0xe2, 0x1c, 0x14, 0x9b, + 0xfb, 0x35, 0xce, 0xe4, 0x10, 0xd7, 0x6e, 0x39, 0x06, 0x19, 0x8b, 0xcb, 0x2d, 0x36, 0x88, 0x87, + 0xfb, 0xc9, 0x2a, 0x0e, 0xe2, 0x72, 0x5a, 0x96, 0x47, 0x1b, 0xbd, 0x62, 0x6e, 0x8c, 0x62, 0x70, + 0x8d, 0x3d, 0xd2, 0xc0, 0x49, 0x3e, 0xf5, 0x5b, 0x05, 0x5c, 0x58, 0xb7, 0x2d, 0x0f, 0x33, 0x0e, + 0x24, 0x8d, 0xd8, 0x22, 0x9e, 0x43, 0x8d, 0x0a, 0xff, 0x0d, 0xd7, 0x41, 0xc6, 0xc2, 0x0d, 0x92, + 0x53, 0x56, 0x95, 0xcb, 0xb3, 0x7a, 0xf1, 0x65, 0xa7, 0x30, 0xd1, 0xed, 0x14, 0x32, 0x7f, 0xc0, + 0x0d, 0xf2, 0xb6, 0x53, 0x28, 0xf4, 0x3a, 0x4e, 0xf3, 0x61, 0x18, 0x09, 0xe2, 0xcc, 0x70, 0x1b, + 0x4c, 0x79, 0xd8, 0xa9, 0x11, 0x2f, 0x97, 0x5a, 0x55, 0x2e, 0x67, 0x4b, 0x97, 0xb4, 0x81, 0x57, + 0xa7, 0x09, 0xe9, 0xf7, 0x39, 0xb9, 0xbe, 0x20, 0xe5, 0x4d, 0x89, 0x35, 0x92, 0x30, 0xb0, 0x08, + 0x66, 0x0d, 0x5f, 0xed, 0x5c, 0x9a, 0xab, 0xb6, 0x2c, 0x49, 0x67, 0x43, 0x7b, 0x42, 0x1a, 0xf5, + 0xbb, 0x21, 0x86, 0x7a, 0xd8, 0x6b, 0xb9, 0x47, 0x63, 0xe8, 0x0e, 0x98, 0x36, 0x5a, 0x8e, 0x43, + 0x2c, 0xdf, 0xd2, 0x5f, 0x8c, 0xb4, 0xf4, 0x21, 0xae, 0xb7, 0x88, 0xd0, 0x41, 0x5f, 0x94, 0x52, + 0xa7, 0xd7, 0x05, 0x08, 0xf2, 0xd1, 0xc6, 0x37, 0xf8, 0x99, 0x02, 0xce, 0xaf, 0x3b, 0xb6, 0xeb, + 0x3e, 0x24, 0x8e, 0x4b, 0x6d, 0x6b, 0xbb, 0xfa, 0x77, 0x62, 0x78, 0x88, 0xec, 0x12, 0x87, 0x58, + 0x06, 0x81, 0xab, 0x20, 0xb3, 0x4f, 0x2d, 0x53, 0x9a, 0x3b, 0xe7, 0x9b, 0x7b, 0x97, 0x5a, 0x26, + 0xe2, 0x27, 0x8c, 0x82, 0x3b, 0x24, 0x15, 0xa7, 0x88, 0x58, 0x5b, 0x02, 0x00, 0x37, 0xa9, 0x14, + 0x20, 0xb5, 0x82, 0x92, 0x0e, 0xac, 0x95, 0x37, 0xe4, 0x09, 0x8a, 0x50, 0xa9, 0x9f, 0x28, 0xe0, + 0xf4, 0x6f, 0x9f, 0x78, 0xc4, 0xb1, 0x70, 0x3d, 0x16, 0x68, 0x15, 0x30, 0xd5, 0xe0, 0x6b, 0xae, + 0x52, 0xb6, 0xf4, 0xf3, 0x91, 0x9e, 0xdb, 0x30, 0x89, 0xe5, 0xd1, 0x5d, 0x4a, 0x9c, 0x30, 0x4e, + 0xc4, 0x09, 0x92, 0x50, 0x47, 0x1e, 0x78, 0xea, 0x17, 0xbd, 0xea, 0x8b, 0xf0, 0x79, 0x2f, 0xea, + 0xbf, 0xaf, 0x70, 0x52, 0x3f, 0x50, 0xc0, 0xd2, 0x9d, 0xf2, 0x5a, 0x45, 0x70, 0x97, 0xed, 0x3a, + 0x35, 0x0e, 0xe0, 0x4d, 0x90, 0xf1, 0x0e, 0x9a, 0x7e, 0x06, 0x5c, 0xf4, 0x2f, 0xfc, 0xfe, 0x41, + 0x93, 0x65, 0xc0, 0xe9, 0x24, 0x3d, 0xdb, 0x47, 0x9c, 0x03, 0xfe, 0x18, 0x4c, 0xb6, 0x99, 0x5c, + 0xae, 0xe5, 0xa4, 0x3e, 0x2f, 0x59, 0x27, 0xb9, 0x32, 0x48, 0x9c, 0xc1, 0x5b, 0x60, 0xbe, 0x49, + 0x1c, 0x6a, 0x9b, 0x15, 0x62, 0xd8, 0x96, 0xe9, 0xf2, 0x80, 0x99, 0xd4, 0xcf, 0x48, 0xe2, 0xf9, + 0x72, 0xf4, 0x10, 0xc5, 0x69, 0xd5, 0xff, 0xa7, 0xc0, 0x62, 0xa8, 0x00, 0x6a, 0xd5, 0x89, 0x0b, + 0xff, 0x06, 0x56, 0x5c, 0x0f, 0x57, 0x69, 0x9d, 0x3e, 0xc5, 0x1e, 0xb5, 0xad, 0x1d, 0x6a, 0x99, + 0xf6, 0xe3, 0x38, 0x7a, 0xbe, 0xdb, 0x29, 0xac, 0x54, 0x06, 0x52, 0xa1, 0x21, 0x08, 0xf0, 0x2e, + 0x98, 0x73, 0x49, 0x9d, 0x18, 0x9e, 0xb0, 0x57, 0xfa, 0xe5, 0x52, 0xb7, 0x53, 0x98, 0xab, 0x44, + 0xf6, 0xdf, 0x76, 0x0a, 0xa7, 0x62, 0x8e, 0x11, 0x87, 0x28, 0xc6, 0x0c, 0xff, 0x04, 0x66, 0x9a, + 0xec, 0x17, 0x25, 0x6e, 0x2e, 0xb5, 0x9a, 0x1e, 0x11, 0x21, 0x49, 0x5f, 0xeb, 0x4b, 0xd2, 0x4b, + 0x33, 0x65, 0x09, 0x82, 0x02, 0x38, 0xf5, 0x45, 0x0a, 0x9c, 0xbb, 0x63, 0x3b, 0xf4, 0x29, 0x4b, + 0xfe, 0x7a, 0xd9, 0x36, 0xd7, 0x24, 0x18, 0x71, 0xe0, 0x23, 0x30, 0xc3, 0x9a, 0x8c, 0x89, 0x3d, + 0x2c, 0x03, 0xf3, 0x97, 0x11, 0xb1, 0x41, 0xaf, 0xd0, 0x9a, 0xfb, 0x35, 0xb6, 0xe1, 0x6a, 0x8c, + 0x5a, 0x6b, 0x5f, 0xd3, 0x44, 0xbd, 0xd8, 0x22, 0x1e, 0x0e, 0x53, 0x3a, 0xdc, 0x43, 0x01, 0x2a, + 0xfc, 0x23, 0xc8, 0xb8, 0x4d, 0x62, 0xc8, 0x00, 0xbd, 0x31, 0xcc, 0xa8, 0xfe, 0x3a, 0x56, 0x9a, + 0xc4, 0x08, 0xcb, 0x0b, 0x5b, 0x21, 0x8e, 0x08, 0x1f, 0x81, 0x29, 0x97, 0x07, 0x32, 0xbf, 0xcb, + 0x6c, 0xe9, 0xe6, 0x3b, 0x60, 0x8b, 0x44, 0x08, 0xf2, 0x4b, 0xac, 0x91, 0xc4, 0x55, 0xbf, 0x54, + 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x70, 0x9b, 0xda, 0x0e, 0xbc, 0x07, 0xa6, 0xf9, 0xce, 0x83, + 0xa6, 0x74, 0xe0, 0x95, 0x43, 0xdd, 0x1b, 0x0f, 0x51, 0x3d, 0xcb, 0xb2, 0xaf, 0x22, 0xd8, 0x91, + 0x8f, 0x03, 0x77, 0xc0, 0x2c, 0xff, 0x79, 0xdb, 0x7e, 0x6c, 0x49, 0xbf, 0x8d, 0x03, 0x3a, 0xcf, + 0x8a, 0x7e, 0xc5, 0x07, 0x40, 0x21, 0x96, 0xfa, 0xaf, 0x34, 0x58, 0x1d, 0x60, 0xcf, 0xba, 0x6d, + 0x99, 0x94, 0xc5, 0x38, 0xbc, 0x13, 0x4b, 0xf3, 0xeb, 0x89, 0x34, 0xbf, 0x38, 0x8a, 0x3f, 0x92, + 0xf6, 0x9b, 0xc1, 0x05, 0xa5, 0x62, 0x58, 0xd2, 0xcd, 0x6f, 0x3b, 0x85, 0x3e, 0x83, 0x95, 0x16, + 0x20, 0xc5, 0x2f, 0x03, 0xb6, 0x01, 0xac, 0x63, 0xd7, 0xbb, 0xef, 0x60, 0xcb, 0x15, 0x92, 0x68, + 0x83, 0xc8, 0xab, 0xbf, 0x72, 0xb8, 0xa0, 0x65, 0x1c, 0xfa, 0x8a, 0xd4, 0x02, 0x6e, 0xf6, 0xa0, + 0xa1, 0x3e, 0x12, 0xe0, 0x4f, 0xc1, 0x94, 0x43, 0xb0, 0x6b, 0x5b, 0xb9, 0x0c, 0xb7, 0x22, 0x08, + 0x16, 0xc4, 0x77, 0x91, 0x3c, 0x85, 0x3f, 0x03, 0xd3, 0x0d, 0xe2, 0xba, 0xb8, 0x46, 0x72, 0x93, + 0x9c, 0x30, 0x28, 0xaf, 0x5b, 0x62, 0x1b, 0xf9, 0xe7, 0xea, 0x57, 0x0a, 0x38, 0x3f, 0xc0, 0x8f, + 0x9b, 0xd4, 0xf5, 0xe0, 0x5f, 0x7a, 0xb2, 0x52, 0x3b, 0x9c, 0x81, 0x8c, 0x9b, 0xe7, 0x64, 0x50, + 0x0f, 0xfc, 0x9d, 0x48, 0x46, 0xee, 0x80, 0x49, 0xea, 0x91, 0x86, 0x5f, 0x67, 0x4a, 0xe3, 0xa7, + 0x4d, 0x58, 0xc1, 0x37, 0x18, 0x10, 0x12, 0x78, 0xea, 0x8b, 0xf4, 0x40, 0xb3, 0x58, 0xda, 0xc2, + 0x36, 0x58, 0xe0, 0x2b, 0xd9, 0x33, 0xc9, 0xae, 0x34, 0x6e, 0x58, 0x51, 0x18, 0x32, 0xa3, 0xe8, + 0x67, 0xa5, 0x16, 0x0b, 0x95, 0x18, 0x2a, 0x4a, 0x48, 0x81, 0xd7, 0x40, 0xb6, 0x41, 0x2d, 0x44, + 0x9a, 0x75, 0x6a, 0x60, 0x57, 0x36, 0xa1, 0xc5, 0x6e, 0xa7, 0x90, 0xdd, 0x0a, 0xb7, 0x51, 0x94, + 0x06, 0xfe, 0x1a, 0x64, 0x1b, 0xf8, 0x49, 0xc0, 0x22, 0x9a, 0xc5, 0x29, 0x29, 0x2f, 0xbb, 0x15, + 0x1e, 0xa1, 0x28, 0x1d, 0x2c, 0xb3, 0x18, 0x60, 0x6d, 0xd6, 0xcd, 0x65, 0xb8, 0x73, 0x7f, 0x32, + 0xb2, 0x21, 0xf3, 0xf2, 0x16, 0x09, 0x15, 0xce, 0x8d, 0x7c, 0x18, 0x68, 0x82, 0x99, 0xaa, 0x2c, + 0x35, 0x3c, 0xac, 0xb2, 0xa5, 0xdf, 0xbc, 0xc3, 0x7d, 0x49, 0x04, 0x7d, 0x8e, 0x85, 0x84, 0xbf, + 0x42, 0x01, 0xb2, 0xfa, 0x3c, 0x03, 0x2e, 0x0c, 0x2d, 0x91, 0xf0, 0x77, 0x00, 0xda, 0x55, 0x97, + 0x38, 0x6d, 0x62, 0xfe, 0x5e, 0x3c, 0x12, 0xd8, 0x4c, 0xc7, 0xee, 0x2f, 0xad, 0x9f, 0x65, 0xd9, + 0xb4, 0xdd, 0x73, 0x8a, 0xfa, 0x70, 0x40, 0x03, 0xcc, 0xb3, 0x1c, 0x13, 0x37, 0x46, 0xe5, 0xf8, + 0x38, 0x5e, 0x02, 0x2f, 0xb3, 0x69, 0x60, 0x33, 0x0a, 0x82, 0xe2, 0x98, 0x70, 0x0d, 0x2c, 0xca, + 0x49, 0x26, 0x71, 0x83, 0xe7, 0xa4, 0x9f, 0x17, 0xd7, 0xe3, 0xc7, 0x28, 0x49, 0xcf, 0x20, 0x4c, + 0xe2, 0x52, 0x87, 0x98, 0x01, 0x44, 0x26, 0x0e, 0x71, 0x3b, 0x7e, 0x8c, 0x92, 0xf4, 0xb0, 0x06, + 0x16, 0x24, 0xaa, 0xbc, 0xd5, 0xdc, 0x24, 0x8f, 0x89, 0xd1, 0x43, 0xa6, 0x6c, 0x4b, 0x41, 0x7c, + 0xaf, 0xc7, 0x60, 0x50, 0x02, 0x16, 0xda, 0x00, 0x18, 0x7e, 0xd1, 0x74, 0x73, 0x53, 0x5c, 0xc8, + 0xad, 0xf1, 0xa3, 0x24, 0x28, 0xbc, 0x61, 0x47, 0x0f, 0xb6, 0x5c, 0x14, 0x11, 0xa1, 0xfe, 0x57, + 0x01, 0x4b, 0xc9, 0x21, 0x35, 0x78, 0x0f, 0x28, 0x03, 0xdf, 0x03, 0x7f, 0x05, 0x33, 0x62, 0xe6, + 0xb1, 0x1d, 0x79, 0xed, 0xbf, 0x3a, 0x64, 0x59, 0xc3, 0x55, 0x52, 0xaf, 0x48, 0x56, 0x11, 0xc4, + 0xfe, 0x0a, 0x05, 0x90, 0xea, 0xb3, 0x0c, 0x00, 0x61, 0x4e, 0xc1, 0xeb, 0xb1, 0x3e, 0xb6, 0x9a, + 0xe8, 0x63, 0x4b, 0xd1, 0xc7, 0x45, 0xa4, 0x67, 0xdd, 0x03, 0x53, 0x36, 0x2f, 0x33, 0x52, 0xc3, + 0xab, 0x43, 0xfc, 0x18, 0xcc, 0x3b, 0x01, 0x90, 0x0e, 0x58, 0x63, 0x90, 0x75, 0x4a, 0x02, 0xc1, + 0x0d, 0x90, 0x69, 0xda, 0xa6, 0x3f, 0xa5, 0x0c, 0x1b, 0xeb, 0xca, 0xb6, 0xe9, 0xc6, 0xe0, 0x66, + 0x98, 0xc6, 0x6c, 0x17, 0x71, 0x08, 0x36, 0x25, 0xfa, 0x9f, 0x12, 0x78, 0x38, 0x66, 0x4b, 0xc5, + 0x21, 0x70, 0xfd, 0x1e, 0xec, 0xc2, 0x7b, 0xfe, 0x09, 0x0a, 0xe0, 0xe0, 0x3f, 0xc0, 0xb2, 0x91, + 0x7c, 0x00, 0xe7, 0xa6, 0x47, 0x0e, 0x56, 0x43, 0xbf, 0x0e, 0xe8, 0x67, 0xba, 0x9d, 0xc2, 0x72, + 0x0f, 0x09, 0xea, 0x95, 0xc4, 0x2c, 0x23, 0xf2, 0xdd, 0x24, 0xeb, 0xdc, 0x30, 0xcb, 0xfa, 0xbd, + 0x10, 0x85, 0x65, 0xfe, 0x09, 0x0a, 0xe0, 0xd4, 0xff, 0x65, 0xc0, 0x5c, 0xec, 0x2d, 0x76, 0xcc, + 0x91, 0x21, 0x92, 0xf9, 0xc8, 0x22, 0x43, 0xc0, 0x1d, 0x69, 0x64, 0x08, 0xc8, 0x63, 0x8a, 0x0c, + 0x21, 0xec, 0x98, 0x22, 0x23, 0x62, 0x59, 0x9f, 0xc8, 0xf8, 0x3c, 0xe5, 0x47, 0x86, 0x18, 0x16, + 0x0e, 0x17, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xe7, 0xed, 0x88, 0x59, 0x4d, 0xf3, 0xdd, + 0xaa, 0xdd, 0x6b, 0x61, 0xcb, 0xa3, 0xde, 0x81, 0x3e, 0xdb, 0xf3, 0x14, 0x36, 0xc1, 0x1c, 0x6e, + 0x13, 0x07, 0xd7, 0x08, 0xdf, 0x96, 0xf1, 0x31, 0x2e, 0xee, 0x12, 0x7b, 0x89, 0xae, 0x45, 0x70, + 0x50, 0x0c, 0x95, 0xb5, 0x74, 0xb9, 0x7e, 0xe0, 0x05, 0x4f, 0x5c, 0xd9, 0xe5, 0x78, 0x4b, 0x5f, + 0xeb, 0x39, 0x45, 0x7d, 0x38, 0xd4, 0xff, 0xa4, 0xc0, 0x72, 0xcf, 0xc7, 0x85, 0xd0, 0x29, 0xca, + 0x7b, 0x72, 0x4a, 0xea, 0x18, 0x9d, 0x92, 0x1e, 0xdb, 0x29, 0x1f, 0xa5, 0x00, 0xec, 0xed, 0x0f, + 0xf0, 0x80, 0x8f, 0x15, 0x86, 0x43, 0xab, 0xc4, 0x14, 0xc7, 0x3f, 0x70, 0x06, 0x8e, 0x8e, 0x23, + 0x51, 0x58, 0x94, 0x94, 0x73, 0xf4, 0x1f, 0x59, 0xc3, 0x4f, 0x5a, 0xe9, 0x23, 0xfb, 0xa4, 0xa5, + 0x7e, 0x9a, 0xf4, 0xdb, 0x09, 0xfc, 0x7c, 0xd6, 0xef, 0x96, 0xd3, 0xc7, 0x73, 0xcb, 0xea, 0xc7, + 0x0a, 0x58, 0x4a, 0x8e, 0x11, 0x27, 0xe4, 0xdb, 0xe9, 0x67, 0x71, 0xd5, 0x4f, 0xe2, 0x77, 0xd3, + 0xe7, 0x0a, 0x38, 0x7d, 0x72, 0xfe, 0x26, 0x51, 0x3f, 0xec, 0x55, 0xf7, 0x04, 0xfc, 0xd9, 0xa1, + 0x5f, 0x7e, 0xf9, 0x26, 0x3f, 0xf1, 0xea, 0x4d, 0x7e, 0xe2, 0xf5, 0x9b, 0xfc, 0xc4, 0x3f, 0xbb, + 0x79, 0xe5, 0x65, 0x37, 0xaf, 0xbc, 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0x75, 0x37, 0xaf, + 0xfc, 0xfb, 0x9b, 0xfc, 0xc4, 0x9f, 0x53, 0xed, 0xd2, 0xf7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x72, + 0xba, 0x02, 0x95, 0x47, 0x1c, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HPAScalingPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HPAScalingPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HPAScalingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HPAScalingRules) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HPAScalingRules) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StabilizationWindowSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) + i-- + dAtA[i] = 0x18 + } + if len(m.Policies) > 0 { + for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Policies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.SelectPolicy != nil { + i -= len(*m.SelectPolicy) + copy(dAtA[i:], *m.SelectPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelectPolicy))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerBehavior) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerBehavior) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerBehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScaleDown != nil { + { + size, err := m.ScaleDown.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ScaleUp != nil { + { + size, err := m.ScaleUp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Behavior != nil { + { + size, err := m.Behavior.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 + if m.MinReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.CurrentMetrics) > 0 { + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 + } + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 + } + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HPAScalingPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + return n +} + +func (m *HPAScalingRules) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SelectPolicy != nil { + l = len(*m.SelectPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StabilizationWindowSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerBehavior) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScaleUp != nil { + l = m.ScaleUp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDown != nil { + l = m.ScaleDown.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Behavior != nil { + l = m.Behavior.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.AverageUtilization)) + } + return n +} + +func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.AverageUtilization)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DescribedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DescribedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HPAScalingPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HPAScalingPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *HPAScalingRules) String() string { + if this == nil { + return "nil" + } + repeatedStringForPolicies := "[]HPAScalingPolicy{" + for _, f := range this.Policies { + repeatedStringForPolicies += strings.Replace(strings.Replace(f.String(), "HPAScalingPolicy", "HPAScalingPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForPolicies += "}" + s := strings.Join([]string{`&HPAScalingRules{`, + `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, + `Policies:` + repeatedStringForPolicies + `,`, + `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerBehavior) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerBehavior{`, + `ScaleUp:` + strings.Replace(this.ScaleUp.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, + `ScaleDown:` + strings.Replace(this.ScaleDown.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, + `Behavior:` + strings.Replace(this.Behavior.String(), "HorizontalPodAutoscalerBehavior", "HorizontalPodAutoscalerBehavior", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *MetricIdentifier) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricIdentifier{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricTarget{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, + `}`, + }, "") + return s +} +func (this *MetricValueStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricValueStatus{`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HPAScalingPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HPAScalingPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HPAScalingPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PeriodSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HPAScalingRules: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HPAScalingRules: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScalingPolicySelect(dAtA[iNdEx:postIndex]) + m.SelectPolicy = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, HPAScalingPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StabilizationWindowSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StabilizationWindowSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleUp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleUp == nil { + m.ScaleUp = &HPAScalingRules{} + } + if err := m.ScaleUp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDown == nil { + m.ScaleDown = &HPAScalingRules{} + } + if err := m.ScaleDown.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Behavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Behavior == nil { + m.Behavior = &HorizontalPodAutoscalerBehavior{} + } + if err := m.Behavior.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricIdentifier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricIdentifier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricTargetType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &resource.Quantity{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AverageUtilization = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricValueStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricValueStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &resource.Quantity{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AverageUtilization = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto new file mode 100644 index 0000000000000..e47d94a4767a7 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -0,0 +1,504 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.autoscaling.v2; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2"; + +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // Container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +message ExternalMetricSource { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +message HPAScalingPolicy { + // Type is used to specify the scaling policy. + optional string type = 1; + + // Value contains the amount of change which is permitted by the policy. + // It must be greater than zero + optional int32 value = 2; + + // PeriodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + optional int32 periodSeconds = 3; +} + +// HPAScalingRules configures the scaling behavior for one direction. +// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// They can limit the scaling velocity by specifying scaling policies. +// They can prevent flapping by specifying the stabilization window, so that the +// number of replicas is not set instantly, instead, the safest value from the stabilization +// window is chosen. +message HPAScalingRules { + // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // considered while scaling up or scaling down. + // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + // If not set, use the default values: + // - For scale up: 0 (i.e. no stabilization is done). + // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + // +optional + optional int32 stabilizationWindowSeconds = 3; + + // selectPolicy is used to specify which policy should be used. + // If not set, the default value Max is used. + // +optional + optional string selectPolicy = 1; + + // policies is a list of potential scaling polices which can be used during scaling. + // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // +listType=atomic + // +optional + repeated HPAScalingPolicy policies = 2; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +message HorizontalPodAutoscalerBehavior { + // scaleUp is scaling policy for scaling Up. + // If not set, the default value is the higher of: + // * increase no more than 4 pods per 60 seconds + // * double the number of pods per 60 seconds + // No stabilization is used. + // +optional + optional HPAScalingRules scaleUp = 1; + + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // +optional + optional HPAScalingRules scaleDown = 2; +} + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +listType=atomic + // +optional + repeated MetricSpec metrics = 4; + + // behavior configures the scaling behavior of the target + // in both Up and Down directions (scaleUp and scaleDown fields respectively). + // If not set, the default HPAScalingRules for scale up and scale down are used. + // +optional + optional HorizontalPodAutoscalerBehavior behavior = 5; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + // +optional + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +listType=atomic + // +optional + repeated MetricStatus currentMetrics = 5; + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + repeated HorizontalPodAutoscalerCondition conditions = 6; +} + +// MetricIdentifier defines the name and optionally selector for a metric +message MetricIdentifier { + // name is the name of the given metric + optional string name = 1; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; + + // containerResource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; + + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +message MetricTarget { + // type represents whether the metric type is Utilization, Value, or AverageValue + optional string type = 1; + + // value is the target value of the metric (as a quantity). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // +optional + optional int32 averageUtilization = 4; +} + +// MetricValueStatus holds the current value for a metric +message MetricValueStatus { + // value is the current value of the metric (as a quantity). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 averageUtilization = 3; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // describedObject specifies the descriptions of a object,such as kind,name apiVersion + optional CrossVersionObjectReference describedObject = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion + optional CrossVersionObjectReference describedObject = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + diff --git a/vendor/k8s.io/api/autoscaling/v2/register.go b/vendor/k8s.io/api/autoscaling/v2/register.go new file mode 100644 index 0000000000000..20f9ea5bb87d3 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go new file mode 100644 index 0000000000000..9931f6146b89a --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -0,0 +1,545 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v2 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +listType=atomic + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` + + // behavior configures the scaling behavior of the target + // in both Up and Down directions (scaleUp and scaleDown fields respectively). + // If not set, the default HPAScalingRules for scale up and scale down are used. + // +optional + Behavior *HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"` +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +type HorizontalPodAutoscalerBehavior struct { + // scaleUp is scaling policy for scaling Up. + // If not set, the default value is the higher of: + // * increase no more than 4 pods per 60 seconds + // * double the number of pods per 60 seconds + // No stabilization is used. + // +optional + ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // +optional + ScaleDown *HPAScalingRules `json:"scaleDown,omitempty" protobuf:"bytes,2,opt,name=scaleDown"` +} + +// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction +type ScalingPolicySelect string + +const ( + // MaxChangePolicySelect selects the policy with the highest possible change. + MaxChangePolicySelect ScalingPolicySelect = "Max" + // MinChangePolicySelect selects the policy with the lowest possible change. + MinChangePolicySelect ScalingPolicySelect = "Min" + // DisabledPolicySelect disables the scaling in this direction. + DisabledPolicySelect ScalingPolicySelect = "Disabled" +) + +// HPAScalingRules configures the scaling behavior for one direction. +// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// They can limit the scaling velocity by specifying scaling policies. +// They can prevent flapping by specifying the stabilization window, so that the +// number of replicas is not set instantly, instead, the safest value from the stabilization +// window is chosen. +type HPAScalingRules struct { + // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // considered while scaling up or scaling down. + // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + // If not set, use the default values: + // - For scale up: 0 (i.e. no stabilization is done). + // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + // +optional + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. + // If not set, the default value Max is used. + // +optional + SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. + // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // +listType=atomic + // +optional + Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"` +} + +// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. +type HPAScalingPolicyType string + +const ( + // PodsScalingPolicy is a policy used to specify a change in absolute number of pods. + PodsScalingPolicy HPAScalingPolicyType = "Pods" + // PercentScalingPolicy is a policy used to specify a relative amount of change with respect to + // the current number of pods. + PercentScalingPolicy HPAScalingPolicyType = "Percent" +) + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +type HPAScalingPolicy struct { + // Type is used to specify the scaling policy. + Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` + // Value contains the amount of change which is permitted by the policy. + // It must be greater than zero + Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` + // PeriodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +const ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // describedObject specifies the descriptions of a object,such as kind,name apiVersion + DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +type ExternalMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// MetricIdentifier defines the name and optionally selector for a metric +type MetricIdentifier struct { + // name is the name of the given metric + Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,name=selector"` +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +type MetricTarget struct { + // type represents whether the metric type is Utilization, Value, or AverageValue + Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,opt,name=averageUtilization"` +} + +// MetricTargetType specifies the type of metric being targeted, and should be either +// "Value", "AverageValue", or "Utilization" +type MetricTargetType string + +const ( + // UtilizationMetricType declares a MetricTarget is an AverageUtilization value + UtilizationMetricType MetricTargetType = "Utilization" + // ValueMetricType declares a MetricTarget is a raw value + ValueMetricType MetricTargetType = "Value" + // AverageValueMetricType declares a MetricTarget is an + AverageValueMetricType MetricTargetType = "AverageValue" +) + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + // +optional + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +listType=atomic + // +optional + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []HorizontalPodAutoscalerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" listType:"map" protobuf:"bytes,6,rep,name=conditions"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +const ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion + DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // Container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// MetricValueStatus holds the current value for a metric +type MetricValueStatus struct { + // value is the current value of the metric (as a quantity). + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,3,opt,name=averageUtilization"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go new file mode 100644 index 0000000000000..05355a5523a53 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -0,0 +1,299 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", + "container": "Container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "metric": "metric identifies the target metric by name and selector", + "target": "target specifies the target value for the given metric", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + +var map_HPAScalingPolicy = map[string]string{ + "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", + "type": "Type is used to specify the scaling policy.", + "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", +} + +func (HPAScalingPolicy) SwaggerDoc() map[string]string { + return map_HPAScalingPolicy +} + +var map_HPAScalingRules = map[string]string{ + "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", + "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", + "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", +} + +func (HPAScalingRules) SwaggerDoc() map[string]string { + return map_HPAScalingRules +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerBehavior = map[string]string{ + "": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", + "scaleUp": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used.", + "scaleDown": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).", +} + +func (HorizontalPodAutoscalerBehavior) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerBehavior +} + +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", + "behavior": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricIdentifier = map[string]string{ + "": "MetricIdentifier defines the name and optionally selector for a metric", + "name": "name is the name of the given metric", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", +} + +func (MetricIdentifier) SwaggerDoc() map[string]string { + return map_MetricIdentifier +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_MetricTarget = map[string]string{ + "": "MetricTarget defines the target value, average value, or average utilization of a specific metric", + "type": "type represents whether the metric type is Utilization, Value, or AverageValue", + "value": "value is the target value of the metric (as a quantity).", + "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", + "averageUtilization": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type", +} + +func (MetricTarget) SwaggerDoc() map[string]string { + return map_MetricTarget +} + +var map_MetricValueStatus = map[string]string{ + "": "MetricValueStatus holds the current value for a metric", + "value": "value is the current value of the metric (as a quantity).", + "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", + "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", +} + +func (MetricValueStatus) SwaggerDoc() map[string]string { + return map_MetricValueStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "describedObject": "describedObject specifies the descriptions of a object,such as kind,name apiVersion", + "target": "target specifies the target value for the given metric", + "metric": "metric identifies the target metric by name and selector", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", + "describedObject": "DescribedObject specifies the descriptions of a object,such as kind,name apiVersion", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metric": "metric identifies the target metric by name and selector", + "target": "target specifies the target value for the given metric", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..125708d6fdaef --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go @@ -0,0 +1,610 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy. +func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy { + if in == nil { + return nil + } + out := new(HPAScalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) { + *out = *in + if in.StabilizationWindowSeconds != nil { + in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds + *out = new(int32) + **out = **in + } + if in.SelectPolicy != nil { + in, out := &in.SelectPolicy, &out.SelectPolicy + *out = new(ScalingPolicySelect) + **out = **in + } + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = make([]HPAScalingPolicy, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules. +func (in *HPAScalingRules) DeepCopy() *HPAScalingRules { + if in == nil { + return nil + } + out := new(HPAScalingRules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) { + *out = *in + if in.ScaleUp != nil { + in, out := &in.ScaleUp, &out.ScaleUp + *out = new(HPAScalingRules) + (*in).DeepCopyInto(*out) + } + if in.ScaleDown != nil { + in, out := &in.ScaleDown, &out.ScaleDown + *out = new(HPAScalingRules) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior. +func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerBehavior) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(HorizontalPodAutoscalerBehavior) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier. +func (in *MetricIdentifier) DeepCopy() *MetricIdentifier { + if in == nil { + return nil + } + out := new(MetricIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTarget) DeepCopyInto(out *MetricTarget) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget. +func (in *MetricTarget) DeepCopy() *MetricTarget { + if in == nil { + return nil + } + out := new(MetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus. +func (in *MetricValueStatus) DeepCopy() *MetricValueStatus { + if in == nil { + return nil + } + out := new(MetricValueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.DescribedObject = in.DescribedObject + in.Target.DeepCopyInto(&out.Target) + in.Metric.DeepCopyInto(&out.Metric) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + out.DescribedObject = in.DescribedObject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 5ad55fa72779a..27f9ab45c4e84 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -248,6 +248,7 @@ message HorizontalPodAutoscalerStatus { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional repeated HorizontalPodAutoscalerCondition conditions = 6; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 05023d9bcf436..22bb7699e9d14 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -264,6 +264,7 @@ type HorizontalPodAutoscalerStatus struct { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } @@ -448,7 +449,7 @@ type ExternalMetricStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.22 -// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v2beta2,HorizontalPodAutoscaler +// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v2,HorizontalPodAutoscaler // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index f10c7884d0d05..610e81f81a7cc 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go index f6baef6997547..3437454ee3efe 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -39,7 +40,7 @@ func (in *HorizontalPodAutoscaler) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *HorizontalPodAutoscaler) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"} + return schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 77a6cb379eeb8..7dee1447040b0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -272,6 +272,7 @@ message HorizontalPodAutoscalerStatus { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional repeated HorizontalPodAutoscalerCondition conditions = 6; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index ac6cb6769030a..10bdec5b94092 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -27,7 +27,8 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.12 -// +k8s:prerelease-lifecycle-gen:deprecated=1.22 +// +k8s:prerelease-lifecycle-gen:deprecated=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v2,HorizontalPodAutoscaler // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource @@ -373,6 +374,7 @@ type HorizontalPodAutoscalerStatus struct { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index 81642822a6a85..49838843c0619 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go index 83926e3a0f9da..6d1c2504abf77 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -20,6 +21,10 @@ limitations under the License. package v2beta2 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { @@ -29,13 +34,19 @@ func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *HorizontalPodAutoscaler) APILifecycleDeprecated() (major, minor int) { - return 1, 22 + return 1, 23 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *HorizontalPodAutoscaler) APILifecycleRemoved() (major, minor int) { - return 1, 25 + return 1, 26 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 1407caebc61fc..a33edf5b7f1f3 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -33,6 +33,8 @@ import ( math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -326,6 +328,34 @@ func (m *JobTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo +func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} } +func (*UncountedTerminatedPods) ProtoMessage() {} +func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{10} +} +func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UncountedTerminatedPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UncountedTerminatedPods) XXX_Merge(src proto.Message) { + xxx_messageInfo_UncountedTerminatedPods.Merge(m, src) +} +func (m *UncountedTerminatedPods) XXX_Size() int { + return m.Size() +} +func (m *UncountedTerminatedPods) XXX_DiscardUnknown() { + xxx_messageInfo_UncountedTerminatedPods.DiscardUnknown(m) +} + +var xxx_messageInfo_UncountedTerminatedPods proto.InternalMessageInfo + func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1.CronJob") proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1.CronJobList") @@ -337,6 +367,7 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1.JobTemplateSpec") + proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods") } func init() { @@ -344,89 +375,96 @@ func init() { } var fileDescriptor_3b52da57c93de713 = []byte{ - // 1304 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xc6, 0x71, 0x6c, 0x8f, 0x93, 0xd4, 0x9d, 0xd2, 0xd6, 0x98, 0xca, 0x1b, 0x4c, 0x41, - 0x01, 0xc1, 0x9a, 0x94, 0x08, 0x21, 0x04, 0x48, 0xd9, 0x54, 0x15, 0x0d, 0x8e, 0x1a, 0xc6, 0xa9, - 0x90, 0xa0, 0x20, 0xc6, 0xbb, 0x63, 0x67, 0x9b, 0xdd, 0x9d, 0xd5, 0xce, 0xd8, 0x22, 0x37, 0x7e, - 0x02, 0xbf, 0x02, 0x71, 0x42, 0x48, 0xdc, 0x39, 0xa2, 0x1e, 0x7b, 0xec, 0x69, 0x45, 0x97, 0x1b, - 0x17, 0xee, 0xe1, 0x82, 0x76, 0x76, 0xbc, 0xbb, 0xb6, 0x77, 0x43, 0xd3, 0x43, 0xc5, 0xcd, 0xfb, - 0xe6, 0xfb, 0xbe, 0x79, 0x7e, 0xef, 0xcd, 0x7b, 0x0f, 0x7c, 0x74, 0xf2, 0x01, 0xd3, 0x2c, 0xda, - 0x3d, 0x19, 0x0f, 0x88, 0xef, 0x12, 0x4e, 0x58, 0x77, 0x42, 0x5c, 0x93, 0xfa, 0x5d, 0x79, 0x80, - 0x3d, 0xab, 0x3b, 0xc0, 0xdc, 0x38, 0xee, 0x4e, 0xb6, 0xbb, 0x23, 0xe2, 0x12, 0x1f, 0x73, 0x62, - 0x6a, 0x9e, 0x4f, 0x39, 0x85, 0x57, 0x62, 0x90, 0x86, 0x3d, 0x4b, 0x13, 0x20, 0x6d, 0xb2, 0xdd, - 0x7a, 0x67, 0x64, 0xf1, 0xe3, 0xf1, 0x40, 0x33, 0xa8, 0xd3, 0x1d, 0xd1, 0x11, 0xed, 0x0a, 0xec, - 0x60, 0x3c, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x8a, 0x35, 0x5a, 0x9d, 0xcc, 0x45, 0x06, 0xf5, 0x49, - 0xce, 0x3d, 0xad, 0x9d, 0x14, 0xe3, 0x60, 0xe3, 0xd8, 0x72, 0x89, 0x7f, 0xda, 0xf5, 0x4e, 0x46, - 0x91, 0x81, 0x75, 0x1d, 0xc2, 0x71, 0x1e, 0xab, 0x5b, 0xc4, 0xf2, 0xc7, 0x2e, 0xb7, 0x1c, 0xb2, - 0x40, 0x78, 0xff, 0xbf, 0x08, 0xcc, 0x38, 0x26, 0x0e, 0x9e, 0xe7, 0x75, 0xfe, 0x51, 0x40, 0x65, - 0xcf, 0xa7, 0xee, 0x3e, 0x1d, 0xc0, 0x6f, 0x41, 0x35, 0xf2, 0xc7, 0xc4, 0x1c, 0x37, 0x95, 0x4d, - 0x65, 0xab, 0x7e, 0xeb, 0x5d, 0x2d, 0x8d, 0x52, 0x22, 0xab, 0x79, 0x27, 0xa3, 0xc8, 0xc0, 0xb4, - 0x08, 0xad, 0x4d, 0xb6, 0xb5, 0x7b, 0x83, 0x87, 0xc4, 0xe0, 0x07, 0x84, 0x63, 0x1d, 0x3e, 0x0a, - 0xd4, 0xa5, 0x30, 0x50, 0x41, 0x6a, 0x43, 0x89, 0x2a, 0xd4, 0xc1, 0x0a, 0xf3, 0x88, 0xd1, 0x5c, - 0x16, 0xea, 0x9b, 0x5a, 0x4e, 0x0e, 0x34, 0xe9, 0x4d, 0xdf, 0x23, 0x86, 0xbe, 0x26, 0xd5, 0x56, - 0xa2, 0x2f, 0x24, 0xb8, 0x70, 0x1f, 0xac, 0x32, 0x8e, 0xf9, 0x98, 0x35, 0x4b, 0x42, 0xa5, 0x73, - 0xae, 0x8a, 0x40, 0xea, 0x1b, 0x52, 0x67, 0x35, 0xfe, 0x46, 0x52, 0xa1, 0xf3, 0xb3, 0x02, 0xea, - 0x12, 0xd9, 0xb3, 0x18, 0x87, 0x0f, 0x16, 0x22, 0xa0, 0x3d, 0x5b, 0x04, 0x22, 0xb6, 0xf8, 0xff, - 0x0d, 0x79, 0x53, 0x75, 0x6a, 0xc9, 0xfc, 0xfb, 0x5d, 0x50, 0xb6, 0x38, 0x71, 0x58, 0x73, 0x79, - 0xb3, 0xb4, 0x55, 0xbf, 0x75, 0xe3, 0x3c, 0xc7, 0xf5, 0x75, 0x29, 0x54, 0xbe, 0x1b, 0x51, 0x50, - 0xcc, 0xec, 0xfc, 0xb4, 0x92, 0x38, 0x1c, 0x85, 0x04, 0xbe, 0x0d, 0xaa, 0x51, 0x62, 0xcd, 0xb1, - 0x4d, 0x84, 0xc3, 0xb5, 0xd4, 0x81, 0xbe, 0xb4, 0xa3, 0x04, 0x01, 0xef, 0x83, 0xeb, 0x8c, 0x63, - 0x9f, 0x5b, 0xee, 0xe8, 0x36, 0xc1, 0xa6, 0x6d, 0xb9, 0xa4, 0x4f, 0x0c, 0xea, 0x9a, 0x4c, 0x64, - 0xa4, 0xa4, 0xbf, 0x12, 0x06, 0xea, 0xf5, 0x7e, 0x3e, 0x04, 0x15, 0x71, 0xe1, 0x03, 0x70, 0xd9, - 0xa0, 0xae, 0x31, 0xf6, 0x7d, 0xe2, 0x1a, 0xa7, 0x87, 0xd4, 0xb6, 0x8c, 0x53, 0x91, 0x9c, 0x9a, - 0xae, 0x49, 0x6f, 0x2e, 0xef, 0xcd, 0x03, 0xce, 0xf2, 0x8c, 0x68, 0x51, 0x08, 0xbe, 0x0e, 0x2a, - 0x6c, 0xcc, 0x3c, 0xe2, 0x9a, 0xcd, 0x95, 0x4d, 0x65, 0xab, 0xaa, 0xd7, 0xc3, 0x40, 0xad, 0xf4, - 0x63, 0x13, 0x9a, 0x9e, 0xc1, 0xaf, 0x40, 0xfd, 0x21, 0x1d, 0x1c, 0x11, 0xc7, 0xb3, 0x31, 0x27, - 0xcd, 0xb2, 0xc8, 0xde, 0xcd, 0xdc, 0x10, 0xef, 0xa7, 0x38, 0x51, 0x65, 0x57, 0xa4, 0x93, 0xf5, - 0xcc, 0x01, 0xca, 0xaa, 0xc1, 0x6f, 0x40, 0x8b, 0x8d, 0x0d, 0x83, 0x30, 0x36, 0x1c, 0xdb, 0xfb, - 0x74, 0xc0, 0x3e, 0xb5, 0x18, 0xa7, 0xfe, 0x69, 0xcf, 0x72, 0x2c, 0xde, 0x5c, 0xdd, 0x54, 0xb6, - 0xca, 0x7a, 0x3b, 0x0c, 0xd4, 0x56, 0xbf, 0x10, 0x85, 0xce, 0x51, 0x80, 0x08, 0x5c, 0x1b, 0x62, - 0xcb, 0x26, 0xe6, 0x82, 0x76, 0x45, 0x68, 0xb7, 0xc2, 0x40, 0xbd, 0x76, 0x27, 0x17, 0x81, 0x0a, - 0x98, 0x9d, 0xdf, 0x96, 0xc1, 0xfa, 0xcc, 0x2b, 0x80, 0x9f, 0x81, 0x55, 0x6c, 0x70, 0x6b, 0x12, - 0x95, 0x4a, 0x54, 0x80, 0xaf, 0x65, 0xa3, 0x13, 0xf5, 0xaf, 0xf4, 0x2d, 0x23, 0x32, 0x24, 0x51, - 0x12, 0x48, 0xfa, 0x74, 0x76, 0x05, 0x15, 0x49, 0x09, 0x68, 0x83, 0x86, 0x8d, 0x19, 0x9f, 0x56, - 0xd9, 0x91, 0xe5, 0x10, 0x91, 0x9f, 0xfa, 0xad, 0xb7, 0x9e, 0xed, 0xc9, 0x44, 0x0c, 0xfd, 0xa5, - 0x30, 0x50, 0x1b, 0xbd, 0x39, 0x1d, 0xb4, 0xa0, 0x0c, 0x7d, 0x00, 0x85, 0x2d, 0x09, 0xa1, 0xb8, - 0xaf, 0x7c, 0xe1, 0xfb, 0xae, 0x85, 0x81, 0x0a, 0x7b, 0x0b, 0x4a, 0x28, 0x47, 0xbd, 0xf3, 0xb7, - 0x02, 0x4a, 0x2f, 0xa6, 0x2d, 0x7e, 0x32, 0xd3, 0x16, 0x6f, 0x14, 0x15, 0x6d, 0x61, 0x4b, 0xbc, - 0x33, 0xd7, 0x12, 0xdb, 0x85, 0x0a, 0xe7, 0xb7, 0xc3, 0xdf, 0x4b, 0x60, 0x6d, 0x9f, 0x0e, 0xf6, - 0xa8, 0x6b, 0x5a, 0xdc, 0xa2, 0x2e, 0xdc, 0x01, 0x2b, 0xfc, 0xd4, 0x9b, 0xb6, 0x96, 0xcd, 0xe9, - 0xd5, 0x47, 0xa7, 0x1e, 0x39, 0x0b, 0xd4, 0x46, 0x16, 0x1b, 0xd9, 0x90, 0x40, 0xc3, 0x5e, 0xe2, - 0xce, 0xb2, 0xe0, 0xed, 0xcc, 0x5e, 0x77, 0x16, 0xa8, 0x39, 0x83, 0x53, 0x4b, 0x94, 0x66, 0x9d, - 0x82, 0x23, 0xb0, 0x1e, 0x25, 0xe7, 0xd0, 0xa7, 0x83, 0xb8, 0xca, 0x4a, 0x17, 0xce, 0xfa, 0x55, - 0xe9, 0xc0, 0x7a, 0x2f, 0x2b, 0x84, 0x66, 0x75, 0xe1, 0x24, 0xae, 0xb1, 0x23, 0x1f, 0xbb, 0x2c, - 0xfe, 0x4b, 0xcf, 0x57, 0xd3, 0x2d, 0x79, 0x9b, 0xa8, 0xb3, 0x59, 0x35, 0x94, 0x73, 0x03, 0x7c, - 0x03, 0xac, 0xfa, 0x04, 0x33, 0xea, 0x8a, 0x7a, 0xae, 0xa5, 0xd9, 0x41, 0xc2, 0x8a, 0xe4, 0x29, - 0x7c, 0x13, 0x54, 0x1c, 0xc2, 0x18, 0x1e, 0x11, 0xd1, 0x71, 0x6a, 0xfa, 0x25, 0x09, 0xac, 0x1c, - 0xc4, 0x66, 0x34, 0x3d, 0xef, 0xfc, 0xa8, 0x80, 0xca, 0x8b, 0x99, 0x69, 0x1f, 0xcf, 0xce, 0xb4, - 0x66, 0x51, 0xe5, 0x15, 0xcc, 0xb3, 0x5f, 0xca, 0xc2, 0x51, 0x31, 0xcb, 0xb6, 0x41, 0xdd, 0xc3, - 0x3e, 0xb6, 0x6d, 0x62, 0x5b, 0xcc, 0x11, 0xbe, 0x96, 0xf5, 0x4b, 0x51, 0x5f, 0x3e, 0x4c, 0xcd, - 0x28, 0x8b, 0x89, 0x28, 0x06, 0x75, 0x3c, 0x9b, 0x44, 0xc1, 0x8c, 0xcb, 0x4d, 0x52, 0xf6, 0x52, - 0x33, 0xca, 0x62, 0xe0, 0x3d, 0x70, 0x35, 0xee, 0x60, 0xf3, 0x13, 0xb0, 0x24, 0x26, 0xe0, 0xcb, - 0x61, 0xa0, 0x5e, 0xdd, 0xcd, 0x03, 0xa0, 0x7c, 0x1e, 0xdc, 0x01, 0x6b, 0x03, 0x6c, 0x9c, 0xd0, - 0xe1, 0x30, 0xdb, 0xb1, 0x1b, 0x61, 0xa0, 0xae, 0xe9, 0x19, 0x3b, 0x9a, 0x41, 0xc1, 0xaf, 0x41, - 0x95, 0x11, 0x9b, 0x18, 0x9c, 0xfa, 0xb2, 0xc4, 0xde, 0x7b, 0xc6, 0xac, 0xe0, 0x01, 0xb1, 0xfb, - 0x92, 0xaa, 0xaf, 0x89, 0x49, 0x2f, 0xbf, 0x50, 0x22, 0x09, 0x3f, 0x04, 0x1b, 0x0e, 0x76, 0xc7, - 0x38, 0x41, 0x8a, 0xda, 0xaa, 0xea, 0x30, 0x0c, 0xd4, 0x8d, 0x83, 0x99, 0x13, 0x34, 0x87, 0x84, - 0x9f, 0x83, 0x2a, 0x9f, 0x8e, 0xd1, 0x55, 0xe1, 0x5a, 0xee, 0xa0, 0x38, 0xa4, 0xe6, 0xcc, 0x14, - 0x4d, 0xaa, 0x24, 0x19, 0xa1, 0x89, 0x4c, 0xb4, 0x78, 0x70, 0x6e, 0xcb, 0x88, 0xed, 0x0e, 0x39, - 0xf1, 0xef, 0x58, 0xae, 0xc5, 0x8e, 0x89, 0xd9, 0xac, 0x8a, 0x70, 0x89, 0xc5, 0xe3, 0xe8, 0xa8, - 0x97, 0x07, 0x41, 0x45, 0x5c, 0xd8, 0x03, 0x1b, 0x69, 0x6a, 0x0f, 0xa8, 0x49, 0x9a, 0x35, 0xf1, - 0x30, 0x6e, 0x46, 0xff, 0x72, 0x6f, 0xe6, 0xe4, 0x6c, 0xc1, 0x82, 0xe6, 0xb8, 0xd9, 0x45, 0x03, - 0x14, 0x2f, 0x1a, 0x9d, 0xbf, 0x4a, 0xa0, 0x96, 0xce, 0xd4, 0xfb, 0x00, 0x18, 0xd3, 0xc6, 0xc5, - 0xe4, 0x5c, 0x7d, 0xb5, 0xe8, 0x11, 0x24, 0x2d, 0x2e, 0x9d, 0x07, 0x89, 0x89, 0xa1, 0x8c, 0x10, - 0xfc, 0x02, 0xd4, 0xc4, 0xb6, 0x25, 0x5a, 0xd0, 0xf2, 0x85, 0x5b, 0xd0, 0x7a, 0x18, 0xa8, 0xb5, - 0xfe, 0x54, 0x00, 0xa5, 0x5a, 0x70, 0x98, 0x0d, 0xd9, 0x73, 0xb6, 0x53, 0x38, 0x1b, 0x5e, 0x71, - 0xc5, 0x9c, 0x6a, 0xd4, 0xd4, 0xe4, 0xae, 0xb1, 0x22, 0x12, 0x5c, 0xb4, 0x46, 0x74, 0x41, 0x4d, - 0xec, 0x45, 0xc4, 0x24, 0xa6, 0xa8, 0xd1, 0xb2, 0x7e, 0x59, 0x42, 0x6b, 0xfd, 0xe9, 0x01, 0x4a, - 0x31, 0x91, 0x70, 0xbc, 0xf0, 0xc8, 0xb5, 0x2b, 0x11, 0x8e, 0xd7, 0x23, 0x24, 0x4f, 0xe1, 0x6d, - 0xd0, 0x90, 0x2e, 0x11, 0xf3, 0xae, 0x6b, 0x92, 0xef, 0x08, 0x13, 0x4f, 0xb3, 0xa6, 0x37, 0x25, - 0xa3, 0xb1, 0x37, 0x77, 0x8e, 0x16, 0x18, 0x9d, 0x5f, 0x15, 0x70, 0x69, 0x6e, 0x5d, 0xfc, 0xff, - 0xef, 0x03, 0xfa, 0xd6, 0xa3, 0xa7, 0xed, 0xa5, 0xc7, 0x4f, 0xdb, 0x4b, 0x4f, 0x9e, 0xb6, 0x97, - 0xbe, 0x0f, 0xdb, 0xca, 0xa3, 0xb0, 0xad, 0x3c, 0x0e, 0xdb, 0xca, 0x93, 0xb0, 0xad, 0xfc, 0x11, - 0xb6, 0x95, 0x1f, 0xfe, 0x6c, 0x2f, 0x7d, 0xb9, 0x3c, 0xd9, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, - 0x5a, 0x54, 0xec, 0x5f, 0x44, 0x0f, 0x00, 0x00, + // 1413 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x41, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0x26, 0x71, 0x62, 0x8f, 0x93, 0xd4, 0x9d, 0xfe, 0xdb, 0xfa, 0x6f, 0x2a, 0x6f, 0x6a, + 0x0a, 0x0a, 0xa8, 0xac, 0x49, 0x88, 0x10, 0x42, 0x80, 0x94, 0x4d, 0x55, 0x68, 0x70, 0xd4, 0x30, + 0x76, 0x84, 0x04, 0x05, 0xb1, 0xde, 0x1d, 0x3b, 0xdb, 0xec, 0xee, 0x58, 0x3b, 0x63, 0x0b, 0xdf, + 0x90, 0xf8, 0x02, 0xf0, 0x25, 0x10, 0x27, 0x84, 0x04, 0x67, 0x8e, 0xa8, 0xc7, 0x1e, 0x7b, 0x5a, + 0xd1, 0xe5, 0x03, 0x70, 0x0f, 0x17, 0x34, 0xb3, 0xe3, 0xdd, 0xb5, 0xbd, 0x1b, 0xd2, 0x1e, 0x2a, + 0x6e, 0xd9, 0x37, 0xbf, 0xf7, 0x9b, 0xe7, 0xf7, 0x7e, 0xf3, 0xde, 0x0b, 0x78, 0xef, 0xf4, 0x1d, + 0xaa, 0xd9, 0xa4, 0x79, 0x3a, 0xec, 0x62, 0xdf, 0xc3, 0x0c, 0xd3, 0xe6, 0x08, 0x7b, 0x16, 0xf1, + 0x9b, 0xf2, 0xc0, 0x18, 0xd8, 0xcd, 0xae, 0xc1, 0xcc, 0x93, 0xe6, 0x68, 0xbb, 0xd9, 0xc7, 0x1e, + 0xf6, 0x0d, 0x86, 0x2d, 0x6d, 0xe0, 0x13, 0x46, 0xe0, 0x95, 0x08, 0xa4, 0x19, 0x03, 0x5b, 0x13, + 0x20, 0x6d, 0xb4, 0x5d, 0x7b, 0xa3, 0x6f, 0xb3, 0x93, 0x61, 0x57, 0x33, 0x89, 0xdb, 0xec, 0x93, + 0x3e, 0x69, 0x0a, 0x6c, 0x77, 0xd8, 0x13, 0x5f, 0xe2, 0x43, 0xfc, 0x15, 0x71, 0xd4, 0x1a, 0xa9, + 0x8b, 0x4c, 0xe2, 0xe3, 0x8c, 0x7b, 0x6a, 0xbb, 0x09, 0xc6, 0x35, 0xcc, 0x13, 0xdb, 0xc3, 0xfe, + 0xb8, 0x39, 0x38, 0xed, 0x73, 0x03, 0x6d, 0xba, 0x98, 0x19, 0x59, 0x5e, 0xcd, 0x3c, 0x2f, 0x7f, + 0xe8, 0x31, 0xdb, 0xc5, 0x73, 0x0e, 0x6f, 0xff, 0x9b, 0x03, 0x35, 0x4f, 0xb0, 0x6b, 0xcc, 0xfa, + 0x35, 0xfe, 0x56, 0xc0, 0xea, 0xbe, 0x4f, 0xbc, 0x03, 0xd2, 0x85, 0x5f, 0x81, 0x22, 0x8f, 0xc7, + 0x32, 0x98, 0x51, 0x55, 0x36, 0x95, 0xad, 0xf2, 0xce, 0x9b, 0x5a, 0x92, 0xa5, 0x98, 0x56, 0x1b, + 0x9c, 0xf6, 0xb9, 0x81, 0x6a, 0x1c, 0xad, 0x8d, 0xb6, 0xb5, 0xfb, 0xdd, 0x87, 0xd8, 0x64, 0x87, + 0x98, 0x19, 0x3a, 0x7c, 0x14, 0xa8, 0x0b, 0x61, 0xa0, 0x82, 0xc4, 0x86, 0x62, 0x56, 0xa8, 0x83, + 0x65, 0x3a, 0xc0, 0x66, 0x75, 0x51, 0xb0, 0x6f, 0x6a, 0x19, 0x35, 0xd0, 0x64, 0x34, 0xed, 0x01, + 0x36, 0xf5, 0x35, 0xc9, 0xb6, 0xcc, 0xbf, 0x90, 0xf0, 0x85, 0x07, 0x60, 0x85, 0x32, 0x83, 0x0d, + 0x69, 0x75, 0x49, 0xb0, 0x34, 0xce, 0x65, 0x11, 0x48, 0x7d, 0x43, 0xf2, 0xac, 0x44, 0xdf, 0x48, + 0x32, 0x34, 0x7e, 0x52, 0x40, 0x59, 0x22, 0x5b, 0x36, 0x65, 0xf0, 0xc1, 0x5c, 0x06, 0xb4, 0x8b, + 0x65, 0x80, 0x7b, 0x8b, 0xdf, 0x5f, 0x91, 0x37, 0x15, 0x27, 0x96, 0xd4, 0xaf, 0xdf, 0x03, 0x05, + 0x9b, 0x61, 0x97, 0x56, 0x17, 0x37, 0x97, 0xb6, 0xca, 0x3b, 0x37, 0xce, 0x0b, 0x5c, 0x5f, 0x97, + 0x44, 0x85, 0x7b, 0xdc, 0x05, 0x45, 0x9e, 0x8d, 0x1f, 0x97, 0xe3, 0x80, 0x79, 0x4a, 0xe0, 0x6d, + 0x50, 0xe4, 0x85, 0xb5, 0x86, 0x0e, 0x16, 0x01, 0x97, 0x92, 0x00, 0xda, 0xd2, 0x8e, 0x62, 0x04, + 0x3c, 0x06, 0xd7, 0x29, 0x33, 0x7c, 0x66, 0x7b, 0xfd, 0x3b, 0xd8, 0xb0, 0x1c, 0xdb, 0xc3, 0x6d, + 0x6c, 0x12, 0xcf, 0xa2, 0xa2, 0x22, 0x4b, 0xfa, 0x4b, 0x61, 0xa0, 0x5e, 0x6f, 0x67, 0x43, 0x50, + 0x9e, 0x2f, 0x7c, 0x00, 0x2e, 0x9b, 0xc4, 0x33, 0x87, 0xbe, 0x8f, 0x3d, 0x73, 0x7c, 0x44, 0x1c, + 0xdb, 0x1c, 0x8b, 0xe2, 0x94, 0x74, 0x4d, 0x46, 0x73, 0x79, 0x7f, 0x16, 0x70, 0x96, 0x65, 0x44, + 0xf3, 0x44, 0xf0, 0x15, 0xb0, 0x4a, 0x87, 0x74, 0x80, 0x3d, 0xab, 0xba, 0xbc, 0xa9, 0x6c, 0x15, + 0xf5, 0x72, 0x18, 0xa8, 0xab, 0xed, 0xc8, 0x84, 0x26, 0x67, 0xf0, 0x73, 0x50, 0x7e, 0x48, 0xba, + 0x1d, 0xec, 0x0e, 0x1c, 0x83, 0xe1, 0x6a, 0x41, 0x54, 0xef, 0x56, 0x66, 0x8a, 0x0f, 0x12, 0x9c, + 0x50, 0xd9, 0x15, 0x19, 0x64, 0x39, 0x75, 0x80, 0xd2, 0x6c, 0xf0, 0x4b, 0x50, 0xa3, 0x43, 0xd3, + 0xc4, 0x94, 0xf6, 0x86, 0xce, 0x01, 0xe9, 0xd2, 0x8f, 0x6c, 0xca, 0x88, 0x3f, 0x6e, 0xd9, 0xae, + 0xcd, 0xaa, 0x2b, 0x9b, 0xca, 0x56, 0x41, 0xaf, 0x87, 0x81, 0x5a, 0x6b, 0xe7, 0xa2, 0xd0, 0x39, + 0x0c, 0x10, 0x81, 0x6b, 0x3d, 0xc3, 0x76, 0xb0, 0x35, 0xc7, 0xbd, 0x2a, 0xb8, 0x6b, 0x61, 0xa0, + 0x5e, 0xbb, 0x9b, 0x89, 0x40, 0x39, 0x9e, 0x8d, 0xdf, 0x16, 0xc1, 0xfa, 0xd4, 0x2b, 0x80, 0x1f, + 0x83, 0x15, 0xc3, 0x64, 0xf6, 0x88, 0x4b, 0x85, 0x0b, 0xf0, 0xe5, 0x74, 0x76, 0x78, 0xff, 0x4a, + 0xde, 0x32, 0xc2, 0x3d, 0xcc, 0x8b, 0x80, 0x93, 0xa7, 0xb3, 0x27, 0x5c, 0x91, 0xa4, 0x80, 0x0e, + 0xa8, 0x38, 0x06, 0x65, 0x13, 0x95, 0x75, 0x6c, 0x17, 0x8b, 0xfa, 0x94, 0x77, 0x5e, 0xbf, 0xd8, + 0x93, 0xe1, 0x1e, 0xfa, 0xff, 0xc2, 0x40, 0xad, 0xb4, 0x66, 0x78, 0xd0, 0x1c, 0x33, 0xf4, 0x01, + 0x14, 0xb6, 0x38, 0x85, 0xe2, 0xbe, 0xc2, 0x33, 0xdf, 0x77, 0x2d, 0x0c, 0x54, 0xd8, 0x9a, 0x63, + 0x42, 0x19, 0xec, 0x8d, 0xbf, 0x14, 0xb0, 0xf4, 0x62, 0xda, 0xe2, 0x07, 0x53, 0x6d, 0xf1, 0x46, + 0x9e, 0x68, 0x73, 0x5b, 0xe2, 0xdd, 0x99, 0x96, 0x58, 0xcf, 0x65, 0x38, 0xbf, 0x1d, 0xfe, 0xbe, + 0x04, 0xd6, 0x0e, 0x48, 0x77, 0x9f, 0x78, 0x96, 0xcd, 0x6c, 0xe2, 0xc1, 0x5d, 0xb0, 0xcc, 0xc6, + 0x83, 0x49, 0x6b, 0xd9, 0x9c, 0x5c, 0xdd, 0x19, 0x0f, 0xf0, 0x59, 0xa0, 0x56, 0xd2, 0x58, 0x6e, + 0x43, 0x02, 0x0d, 0x5b, 0x71, 0x38, 0x8b, 0xc2, 0x6f, 0x77, 0xfa, 0xba, 0xb3, 0x40, 0xcd, 0x18, + 0x9c, 0x5a, 0xcc, 0x34, 0x1d, 0x14, 0xec, 0x83, 0x75, 0x5e, 0x9c, 0x23, 0x9f, 0x74, 0x23, 0x95, + 0x2d, 0x3d, 0x73, 0xd5, 0xaf, 0xca, 0x00, 0xd6, 0x5b, 0x69, 0x22, 0x34, 0xcd, 0x0b, 0x47, 0x91, + 0xc6, 0x3a, 0xbe, 0xe1, 0xd1, 0xe8, 0x27, 0x3d, 0x9f, 0xa6, 0x6b, 0xf2, 0x36, 0xa1, 0xb3, 0x69, + 0x36, 0x94, 0x71, 0x03, 0x7c, 0x15, 0xac, 0xf8, 0xd8, 0xa0, 0xc4, 0x13, 0x7a, 0x2e, 0x25, 0xd5, + 0x41, 0xc2, 0x8a, 0xe4, 0x29, 0x7c, 0x0d, 0xac, 0xba, 0x98, 0x52, 0xa3, 0x8f, 0x45, 0xc7, 0x29, + 0xe9, 0x97, 0x24, 0x70, 0xf5, 0x30, 0x32, 0xa3, 0xc9, 0x79, 0xe3, 0x07, 0x05, 0xac, 0xbe, 0x98, + 0x99, 0xf6, 0xfe, 0xf4, 0x4c, 0xab, 0xe6, 0x29, 0x2f, 0x67, 0x9e, 0xfd, 0x5c, 0x10, 0x81, 0x8a, + 0x59, 0xb6, 0x0d, 0xca, 0x03, 0xc3, 0x37, 0x1c, 0x07, 0x3b, 0x36, 0x75, 0x45, 0xac, 0x05, 0xfd, + 0x12, 0xef, 0xcb, 0x47, 0x89, 0x19, 0xa5, 0x31, 0xdc, 0xc5, 0x24, 0xee, 0xc0, 0xc1, 0x3c, 0x99, + 0x91, 0xdc, 0xa4, 0xcb, 0x7e, 0x62, 0x46, 0x69, 0x0c, 0xbc, 0x0f, 0xae, 0x46, 0x1d, 0x6c, 0x76, + 0x02, 0x2e, 0x89, 0x09, 0xf8, 0xff, 0x30, 0x50, 0xaf, 0xee, 0x65, 0x01, 0x50, 0xb6, 0x1f, 0xdc, + 0x05, 0x6b, 0x5d, 0xc3, 0x3c, 0x25, 0xbd, 0x5e, 0xba, 0x63, 0x57, 0xc2, 0x40, 0x5d, 0xd3, 0x53, + 0x76, 0x34, 0x85, 0x82, 0x5f, 0x80, 0x22, 0xc5, 0x0e, 0x36, 0x19, 0xf1, 0xa5, 0xc4, 0xde, 0xba, + 0x60, 0x55, 0x8c, 0x2e, 0x76, 0xda, 0xd2, 0x55, 0x5f, 0x13, 0x93, 0x5e, 0x7e, 0xa1, 0x98, 0x12, + 0xbe, 0x0b, 0x36, 0x5c, 0xc3, 0x1b, 0x1a, 0x31, 0x52, 0x68, 0xab, 0xa8, 0xc3, 0x30, 0x50, 0x37, + 0x0e, 0xa7, 0x4e, 0xd0, 0x0c, 0x12, 0x7e, 0x02, 0x8a, 0x6c, 0x32, 0x46, 0x57, 0x44, 0x68, 0x99, + 0x83, 0xe2, 0x88, 0x58, 0x53, 0x53, 0x34, 0x56, 0x49, 0x3c, 0x42, 0x63, 0x1a, 0xbe, 0x78, 0x30, + 0xe6, 0xc8, 0x8c, 0xed, 0xf5, 0x18, 0xf6, 0xef, 0xda, 0x9e, 0x4d, 0x4f, 0xb0, 0x55, 0x2d, 0x8a, + 0x74, 0x89, 0xc5, 0xa3, 0xd3, 0x69, 0x65, 0x41, 0x50, 0x9e, 0x2f, 0x6c, 0x81, 0x8d, 0xa4, 0xb4, + 0x87, 0xc4, 0xc2, 0xd5, 0x92, 0x78, 0x18, 0xb7, 0xf8, 0xaf, 0xdc, 0x9f, 0x3a, 0x39, 0x9b, 0xb3, + 0xa0, 0x19, 0xdf, 0xf4, 0xa2, 0x01, 0xf2, 0x17, 0x8d, 0xc6, 0xf7, 0x05, 0x50, 0x4a, 0x66, 0xea, + 0x31, 0x00, 0xe6, 0xa4, 0x71, 0x51, 0x39, 0x57, 0x6f, 0xe6, 0x3d, 0x82, 0xb8, 0xc5, 0x25, 0xf3, + 0x20, 0x36, 0x51, 0x94, 0x22, 0x82, 0x9f, 0x82, 0x92, 0xd8, 0xb6, 0x44, 0x0b, 0x5a, 0x7c, 0xe6, + 0x16, 0xb4, 0x1e, 0x06, 0x6a, 0xa9, 0x3d, 0x21, 0x40, 0x09, 0x17, 0xec, 0xa5, 0x53, 0xf6, 0x9c, + 0xed, 0x14, 0x4e, 0xa7, 0x57, 0x5c, 0x31, 0xc3, 0xca, 0x9b, 0x9a, 0xdc, 0x35, 0x96, 0x45, 0x81, + 0xf3, 0xd6, 0x88, 0x26, 0x28, 0x89, 0xbd, 0x08, 0x5b, 0xd8, 0x12, 0x1a, 0x2d, 0xe8, 0x97, 0x25, + 0xb4, 0xd4, 0x9e, 0x1c, 0xa0, 0x04, 0xc3, 0x89, 0xa3, 0x85, 0x47, 0xae, 0x5d, 0x31, 0x71, 0xb4, + 0x1e, 0x21, 0x79, 0x0a, 0xef, 0x80, 0x8a, 0x0c, 0x09, 0x5b, 0xf7, 0x3c, 0x0b, 0x7f, 0x8d, 0xa9, + 0x78, 0x9a, 0x25, 0xbd, 0x2a, 0x3d, 0x2a, 0xfb, 0x33, 0xe7, 0x68, 0xce, 0x03, 0x7e, 0xab, 0x80, + 0xeb, 0x43, 0xcf, 0x24, 0x43, 0x8f, 0x61, 0xab, 0x83, 0x7d, 0xd7, 0xf6, 0xf8, 0x3f, 0x4f, 0x47, + 0xc4, 0xa2, 0x42, 0xb9, 0xe5, 0x9d, 0xdb, 0x99, 0xc5, 0x3e, 0xce, 0xf6, 0x89, 0x74, 0x9e, 0x73, + 0x88, 0xf2, 0x6e, 0x82, 0x2a, 0x28, 0xf8, 0xd8, 0xb0, 0xc6, 0x42, 0xde, 0x05, 0xbd, 0xc4, 0xdb, + 0x28, 0xe2, 0x06, 0x14, 0xd9, 0x1b, 0xbf, 0x28, 0xe0, 0xd2, 0xcc, 0x56, 0xfb, 0xdf, 0x5f, 0x5b, + 0x1a, 0xbf, 0x2a, 0x20, 0x2f, 0x17, 0xf0, 0x28, 0xad, 0x0b, 0xfe, 0xac, 0x4a, 0xfa, 0xce, 0x94, + 0x26, 0xce, 0x02, 0xf5, 0x66, 0xde, 0xff, 0xbc, 0x7c, 0x0b, 0xa1, 0xda, 0xf1, 0xbd, 0x3b, 0x69, + 0xe1, 0x7c, 0x18, 0x0b, 0x67, 0x51, 0xd0, 0x35, 0x13, 0xd1, 0x5c, 0x8c, 0x4b, 0xba, 0xeb, 0x5b, + 0x8f, 0x9e, 0xd6, 0x17, 0x1e, 0x3f, 0xad, 0x2f, 0x3c, 0x79, 0x5a, 0x5f, 0xf8, 0x26, 0xac, 0x2b, + 0x8f, 0xc2, 0xba, 0xf2, 0x38, 0xac, 0x2b, 0x4f, 0xc2, 0xba, 0xf2, 0x47, 0x58, 0x57, 0xbe, 0xfb, + 0xb3, 0xbe, 0xf0, 0xd9, 0xe2, 0x68, 0xfb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54, 0x31, 0x16, + 0xcf, 0xa2, 0x10, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -938,6 +976,23 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ready != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Ready)) + i-- + dAtA[i] = 0x48 + } + if m.UncountedTerminatedPods != nil { + { + size, err := m.UncountedTerminatedPods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.CompletedIndexes) copy(dAtA[i:], m.CompletedIndexes) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompletedIndexes))) @@ -1036,6 +1091,47 @@ func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UncountedTerminatedPods) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UncountedTerminatedPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Failed) > 0 { + for iNdEx := len(m.Failed) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Failed[iNdEx]) + copy(dAtA[i:], m.Failed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Failed[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Succeeded) > 0 { + for iNdEx := len(m.Succeeded) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Succeeded[iNdEx]) + copy(dAtA[i:], m.Succeeded[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Succeeded[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -1247,6 +1343,13 @@ func (m *JobStatus) Size() (n int) { n += 1 + sovGenerated(uint64(m.Failed)) l = len(m.CompletedIndexes) n += 1 + l + sovGenerated(uint64(l)) + if m.UncountedTerminatedPods != nil { + l = m.UncountedTerminatedPods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Ready != nil { + n += 1 + sovGenerated(uint64(*m.Ready)) + } return n } @@ -1263,6 +1366,27 @@ func (m *JobTemplateSpec) Size() (n int) { return n } +func (m *UncountedTerminatedPods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Succeeded) > 0 { + for _, s := range m.Succeeded { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Failed) > 0 { + for _, s := range m.Failed { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1409,6 +1533,8 @@ func (this *JobStatus) String() string { `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, `CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`, + `UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, `}`, }, "") return s @@ -1424,6 +1550,17 @@ func (this *JobTemplateSpec) String() string { }, "") return s } +func (this *UncountedTerminatedPods) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UncountedTerminatedPods{`, + `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -3110,6 +3247,62 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } m.CompletedIndexes = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UncountedTerminatedPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UncountedTerminatedPods == nil { + m.UncountedTerminatedPods = &UncountedTerminatedPods{} + } + if err := m.UncountedTerminatedPods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3247,6 +3440,120 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *UncountedTerminatedPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UncountedTerminatedPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UncountedTerminatedPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Succeeded = append(m.Succeeded, k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failed = append(m.Failed, k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 04f0e7ea7e690..161886029b278 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -227,8 +227,6 @@ message JobSpec { // guarantees (e.g. finalizers) will be honored. If this field is unset, // the Job won't be automatically deleted. If this field is set to zero, // the Job becomes eligible to be deleted immediately after it finishes. - // This field is alpha-level and is only honored by servers that enable the - // TTLAfterFinished feature. // +optional optional int32 ttlSecondsAfterFinished = 8; @@ -246,9 +244,11 @@ message JobSpec { // for each index. // When value is `Indexed`, .spec.completions must be specified and // `.spec.parallelism` must be less than or equal to 10^5. + // In addition, The Pod name takes the form + // `$(job-name)-$(index)-$(random-string)`, + // the Pod hostname takes the form `$(job-name)-$(index)`. // - // This field is alpha-level and is only honored by servers that enable the - // IndexedJob feature gate. More completion modes can be added in the future. + // This field is beta-level. More completion modes can be added in the future. // If the Job controller observes a mode that it doesn't recognize, the // controller skips updates for the Job. // +optional @@ -260,9 +260,11 @@ message JobSpec { // false to true), the Job controller will delete all active Pods associated // with this Job. Users must design their workload to gracefully handle this. // Suspending a Job will reset the StartTime field of the Job, effectively - // resetting the ActiveDeadlineSeconds timer too. This is an alpha field and - // requires the SuspendJob feature gate to be enabled; otherwise this field - // may not be set to true. Defaults to false. + // resetting the ActiveDeadlineSeconds timer too. Defaults to false. + // + // This field is beta-level, gated by SuspendJob feature flag (enabled by + // default). + // // +optional optional bool suspend = 10; } @@ -296,7 +298,7 @@ message JobStatus { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; - // The number of actively running pods. + // The number of pending and running pods. // +optional optional int32 active = 4; @@ -317,6 +319,32 @@ message JobStatus { // represented as "1,3-5,7". // +optional optional string completedIndexes = 7; + + // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // the job controller hasn't yet accounted for in the status counters. + // + // The job controller creates pods with a finalizer. When a pod terminates + // (succeeded or failed), the controller does three steps to account for it + // in the job status: + // (1) Add the pod UID to the arrays in this field. + // (2) Remove the pod finalizer. + // (3) Remove the pod UID from the arrays while increasing the corresponding + // counter. + // + // This field is beta-level. The job controller only makes use of this field + // when the feature gate JobTrackingWithFinalizers is enabled (enabled + // by default). + // Old jobs might not be tracked using this field, in which case the field + // remains null. + // +optional + optional UncountedTerminatedPods uncountedTerminatedPods = 8; + + // The number of pods which have a Ready condition. + // + // This field is alpha-level. The job controller populates the field when + // the feature gate JobReadyPods is enabled (disabled by default). + // +optional + optional int32 ready = 9; } // JobTemplateSpec describes the data a Job should have when created from a template @@ -332,3 +360,17 @@ message JobTemplateSpec { optional JobSpec spec = 2; } +// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't +// been accounted in Job status counters. +message UncountedTerminatedPods { + // Succeeded holds UIDs of succeeded Pods. + // +listType=set + // +optional + repeated string succeeded = 1; + + // Failed holds UIDs of failed Pods. + // +listType=set + // +optional + repeated string failed = 2; +} + diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 12f4b04cbd9ad..13cebde3f1cfe 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -19,9 +19,20 @@ package v1 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) -const JobCompletionIndexAnnotationAlpha = "batch.kubernetes.io/job-completion-index" +const ( + JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index" + + // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from + // being deleted before being accounted in the Job status. + // The apiserver and job controller use this string as a Job annotation, to + // mark Jobs that are being tracked using pod finalizers. Two releases after + // the JobTrackingWithFinalizers graduates to GA, JobTrackingFinalizer will + // no longer be used as a Job annotation. + JobTrackingFinalizer = "batch.kubernetes.io/job-tracking" +) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -60,6 +71,7 @@ type JobList struct { } // CompletionMode specifies how Pod completions of a Job are tracked. +// +enum type CompletionMode string const ( @@ -143,8 +155,6 @@ type JobSpec struct { // guarantees (e.g. finalizers) will be honored. If this field is unset, // the Job won't be automatically deleted. If this field is set to zero, // the Job becomes eligible to be deleted immediately after it finishes. - // This field is alpha-level and is only honored by servers that enable the - // TTLAfterFinished feature. // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` @@ -162,9 +172,11 @@ type JobSpec struct { // for each index. // When value is `Indexed`, .spec.completions must be specified and // `.spec.parallelism` must be less than or equal to 10^5. + // In addition, The Pod name takes the form + // `$(job-name)-$(index)-$(random-string)`, + // the Pod hostname takes the form `$(job-name)-$(index)`. // - // This field is alpha-level and is only honored by servers that enable the - // IndexedJob feature gate. More completion modes can be added in the future. + // This field is beta-level. More completion modes can be added in the future. // If the Job controller observes a mode that it doesn't recognize, the // controller skips updates for the Job. // +optional @@ -176,9 +188,11 @@ type JobSpec struct { // false to true), the Job controller will delete all active Pods associated // with this Job. Users must design their workload to gracefully handle this. // Suspending a Job will reset the StartTime field of the Job, effectively - // resetting the ActiveDeadlineSeconds timer too. This is an alpha field and - // requires the SuspendJob feature gate to be enabled; otherwise this field - // may not be set to true. Defaults to false. + // resetting the ActiveDeadlineSeconds timer too. Defaults to false. + // + // This field is beta-level, gated by SuspendJob feature flag (enabled by + // default). + // // +optional Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"` } @@ -212,7 +226,7 @@ type JobStatus struct { // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - // The number of actively running pods. + // The number of pending and running pods. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` @@ -233,11 +247,51 @@ type JobStatus struct { // represented as "1,3-5,7". // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` + + // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // the job controller hasn't yet accounted for in the status counters. + // + // The job controller creates pods with a finalizer. When a pod terminates + // (succeeded or failed), the controller does three steps to account for it + // in the job status: + // (1) Add the pod UID to the arrays in this field. + // (2) Remove the pod finalizer. + // (3) Remove the pod UID from the arrays while increasing the corresponding + // counter. + // + // This field is beta-level. The job controller only makes use of this field + // when the feature gate JobTrackingWithFinalizers is enabled (enabled + // by default). + // Old jobs might not be tracked using this field, in which case the field + // remains null. + // +optional + UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` + + // The number of pods which have a Ready condition. + // + // This field is alpha-level. The job controller populates the field when + // the feature gate JobReadyPods is enabled (disabled by default). + // +optional + Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` +} + +// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't +// been accounted in Job status counters. +type UncountedTerminatedPods struct { + // Succeeded holds UIDs of succeeded Pods. + // +listType=set + // +optional + Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` + + // Failed holds UIDs of failed Pods. + // +listType=set + // +optional + Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` } type JobConditionType string -// These are valid conditions of a job. +// These are built-in conditions of a job. const ( // JobSuspended means the job has been suspended. JobSuspended JobConditionType = "Suspended" @@ -359,6 +413,7 @@ type CronJobSpec struct { // Only one of the following concurrent policies may be specified. // If none of the following policies is specified, the default one // is AllowConcurrent. +// +enum type ConcurrencyPolicy string const ( diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index d98c5edeb1e68..269021a9c04b9 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -118,9 +118,9 @@ var map_JobSpec = map[string]string{ "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\n\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false.", + "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", + "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.", + "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { @@ -128,14 +128,16 @@ func (JobSpec) SwaggerDoc() map[string]string { } var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", - "active": "The number of actively running pods.", - "succeeded": "The number of pods which reached phase Succeeded.", - "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "": "JobStatus represents the current state of a Job.", + "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", + "active": "The number of pending and running pods.", + "succeeded": "The number of pods which reached phase Succeeded.", + "failed": "The number of pods which reached phase Failed.", + "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is beta-level. The job controller only makes use of this field when the feature gate JobTrackingWithFinalizers is enabled (enabled by default). Old jobs might not be tracked using this field, in which case the field remains null.", + "ready": "The number of pods which have a Ready condition.\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobReadyPods is enabled (disabled by default).", } func (JobStatus) SwaggerDoc() map[string]string { @@ -152,4 +154,14 @@ func (JobTemplateSpec) SwaggerDoc() map[string]string { return map_JobTemplateSpec } +var map_UncountedTerminatedPods = map[string]string{ + "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", + "succeeded": "Succeeded holds UIDs of succeeded Pods.", + "failed": "Failed holds UIDs of failed Pods.", +} + +func (UncountedTerminatedPods) SwaggerDoc() map[string]string { + return map_UncountedTerminatedPods +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 6018ad1d3c013..a9806a5024680 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -24,6 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -312,6 +314,16 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } + if in.UncountedTerminatedPods != nil { + in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods + *out = new(UncountedTerminatedPods) + (*in).DeepCopyInto(*out) + } + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(int32) + **out = **in + } return } @@ -342,3 +354,29 @@ func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) { + *out = *in + if in.Succeeded != nil { + in, out := &in.Succeeded, &out.Succeeded + *out = make([]types.UID, len(*in)) + copy(*out, *in) + } + if in.Failed != nil { + in, out := &in.Failed, &out.Failed + *out = make([]types.UID, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UncountedTerminatedPods. +func (in *UncountedTerminatedPods) DeepCopy() *UncountedTerminatedPods { + if in == nil { + return nil + } + out := new(UncountedTerminatedPods) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 77244485ba640..8c256848dc2dd 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 5379f896c19d3..2836b3b014f3e 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/certificates/v1/generated.pb.go b/vendor/k8s.io/api/certificates/v1/generated.pb.go index fca7d21154129..3f06ac97a965d 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1/generated.pb.go @@ -229,62 +229,64 @@ func init() { } var fileDescriptor_17e045d0de66f3c7 = []byte{ - // 873 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0xfa, 0x57, 0xec, 0x71, 0x49, 0xab, 0x11, 0xaa, 0x16, 0x4b, 0xdd, 0x8d, 0x56, 0x50, - 0x05, 0x04, 0xbb, 0x38, 0x2a, 0x10, 0x0a, 0xe2, 0xb0, 0x69, 0x85, 0x2a, 0x52, 0x90, 0x26, 0x09, - 0x87, 0xc2, 0xa1, 0x93, 0xf5, 0xeb, 0x66, 0xea, 0xee, 0x0f, 0x66, 0x66, 0x2d, 0x7c, 0xeb, 0x9f, - 0xc0, 0x91, 0x23, 0xff, 0x09, 0xd7, 0x1c, 0x7b, 0x2c, 0x12, 0xb2, 0x88, 0x7b, 0xe1, 0x6f, 0xc8, - 0x09, 0xcd, 0xec, 0x78, 0xed, 0xfc, 0x70, 0x5b, 0x72, 0xdb, 0xf9, 0xde, 0xf7, 0xbe, 0xef, 0xbd, - 0xb7, 0x6f, 0x06, 0xed, 0x8c, 0xb6, 0x85, 0xcf, 0xb2, 0x60, 0x54, 0x1c, 0x02, 0x4f, 0x41, 0x82, - 0x08, 0xc6, 0x90, 0x0e, 0x33, 0x1e, 0x98, 0x00, 0xcd, 0x59, 0x10, 0x01, 0x97, 0xec, 0x09, 0x8b, - 0xa8, 0x0e, 0x0f, 0x82, 0x18, 0x52, 0xe0, 0x54, 0xc2, 0xd0, 0xcf, 0x79, 0x26, 0x33, 0xdc, 0x2f, - 0xb9, 0x3e, 0xcd, 0x99, 0xbf, 0xcc, 0xf5, 0xc7, 0x83, 0xfe, 0x27, 0x31, 0x93, 0x47, 0xc5, 0xa1, - 0x1f, 0x65, 0x49, 0x10, 0x67, 0x71, 0x16, 0xe8, 0x94, 0xc3, 0xe2, 0x89, 0x3e, 0xe9, 0x83, 0xfe, - 0x2a, 0xa5, 0xfa, 0xde, 0xb2, 0x6d, 0xc6, 0xe1, 0x12, 0xbb, 0xfe, 0x9d, 0x05, 0x27, 0xa1, 0xd1, - 0x11, 0x4b, 0x81, 0x4f, 0x82, 0x7c, 0x14, 0x2b, 0x40, 0x04, 0x09, 0x48, 0x7a, 0x59, 0x56, 0xb0, - 0x2a, 0x8b, 0x17, 0xa9, 0x64, 0x09, 0x5c, 0x48, 0xf8, 0xfc, 0x4d, 0x09, 0x22, 0x3a, 0x82, 0x84, - 0x9e, 0xcf, 0xf3, 0xfe, 0xac, 0xa3, 0xf7, 0x76, 0x16, 0x53, 0xd8, 0x63, 0x71, 0xca, 0xd2, 0x98, - 0xc0, 0x2f, 0x05, 0x08, 0x89, 0x1f, 0xa3, 0x8e, 0xaa, 0x70, 0x48, 0x25, 0xb5, 0xad, 0x0d, 0x6b, - 0xb3, 0xb7, 0xf5, 0xa9, 0xbf, 0x18, 0x5f, 0x65, 0xe4, 0xe7, 0xa3, 0x58, 0x01, 0xc2, 0x57, 0x6c, - 0x7f, 0x3c, 0xf0, 0x7f, 0x38, 0x7c, 0x0a, 0x91, 0x7c, 0x08, 0x92, 0x86, 0xf8, 0x78, 0xea, 0xd6, - 0x66, 0x53, 0x17, 0x2d, 0x30, 0x52, 0xa9, 0xe2, 0x9f, 0x50, 0x53, 0xe4, 0x10, 0xd9, 0x75, 0xad, - 0xfe, 0xa5, 0xbf, 0xfa, 0xe7, 0xf8, 0x2b, 0xcb, 0xdc, 0xcb, 0x21, 0x0a, 0xaf, 0x19, 0x9b, 0xa6, - 0x3a, 0x11, 0x2d, 0x8a, 0x23, 0xd4, 0x16, 0x92, 0xca, 0x42, 0xd8, 0x0d, 0x2d, 0xff, 0xd5, 0xd5, - 0xe4, 0xb5, 0x44, 0xb8, 0x6e, 0x0c, 0xda, 0xe5, 0x99, 0x18, 0x69, 0xef, 0x55, 0x03, 0x79, 0x2b, - 0x73, 0x77, 0xb2, 0x74, 0xc8, 0x24, 0xcb, 0x52, 0xbc, 0x8d, 0x9a, 0x72, 0x92, 0x83, 0x1e, 0x63, - 0x37, 0x7c, 0x7f, 0x5e, 0xed, 0xfe, 0x24, 0x87, 0xd3, 0xa9, 0xfb, 0xee, 0x79, 0xbe, 0xc2, 0x89, - 0xce, 0xc0, 0xbb, 0x55, 0x17, 0x6d, 0x9d, 0x7b, 0xe7, 0x6c, 0x21, 0xa7, 0x53, 0xf7, 0x92, 0x3d, - 0xf4, 0x2b, 0xa5, 0xb3, 0xe5, 0xe2, 0xdb, 0xa8, 0xcd, 0x81, 0x8a, 0x2c, 0xd5, 0x23, 0xef, 0x2e, - 0xda, 0x22, 0x1a, 0x25, 0x26, 0x8a, 0x3f, 0x44, 0x6b, 0x09, 0x08, 0x41, 0x63, 0xd0, 0xc3, 0xeb, - 0x86, 0xd7, 0x0d, 0x71, 0xed, 0x61, 0x09, 0x93, 0x79, 0x1c, 0x3f, 0x45, 0xeb, 0xcf, 0xa8, 0x90, - 0x07, 0xf9, 0x90, 0x4a, 0xd8, 0x67, 0x09, 0xd8, 0x4d, 0x3d, 0xee, 0x8f, 0xde, 0x6e, 0x57, 0x54, - 0x46, 0x78, 0xd3, 0xa8, 0xaf, 0xef, 0x9e, 0x51, 0x22, 0xe7, 0x94, 0xf1, 0x18, 0x61, 0x85, 0xec, - 0x73, 0x9a, 0x8a, 0x72, 0x50, 0xca, 0xaf, 0xf5, 0xbf, 0xfd, 0xfa, 0xc6, 0x0f, 0xef, 0x5e, 0x50, - 0x23, 0x97, 0x38, 0x78, 0x7f, 0x59, 0xe8, 0xd6, 0xca, 0xbf, 0xbc, 0xcb, 0x84, 0xc4, 0x3f, 0x5f, - 0xb8, 0x2b, 0xfe, 0xdb, 0xd5, 0xa3, 0xb2, 0xf5, 0x4d, 0xb9, 0x61, 0x6a, 0xea, 0xcc, 0x91, 0xa5, - 0x7b, 0xf2, 0x08, 0xb5, 0x98, 0x84, 0x44, 0xd8, 0xf5, 0x8d, 0xc6, 0x66, 0x6f, 0xeb, 0xb3, 0x2b, - 0x6d, 0x72, 0xf8, 0x8e, 0x71, 0x68, 0x3d, 0x50, 0x5a, 0xa4, 0x94, 0xf4, 0xfe, 0x6d, 0xbc, 0xa6, - 0x37, 0x75, 0x9d, 0xf0, 0x07, 0x68, 0x8d, 0x97, 0x47, 0xdd, 0xda, 0xb5, 0xb0, 0xa7, 0x16, 0xc1, - 0x30, 0xc8, 0x3c, 0x86, 0xb7, 0x10, 0x12, 0x2c, 0x4e, 0x81, 0x7f, 0x4f, 0x13, 0xb0, 0xd7, 0xf4, - 0xda, 0x54, 0xd7, 0x7f, 0xaf, 0x8a, 0x90, 0x25, 0x16, 0xf6, 0x51, 0xbb, 0x50, 0x5b, 0x24, 0xec, - 0xd6, 0x46, 0x63, 0xb3, 0x1b, 0xde, 0x54, 0xbb, 0x78, 0xa0, 0x91, 0xd3, 0xa9, 0xdb, 0xf9, 0x0e, - 0x26, 0xfa, 0x40, 0x0c, 0x0b, 0x7f, 0x8c, 0x3a, 0x85, 0x00, 0x9e, 0x2a, 0x87, 0x72, 0x83, 0xab, - 0xb1, 0x1d, 0x18, 0x9c, 0x54, 0x0c, 0x7c, 0x0b, 0x35, 0x0a, 0x36, 0x34, 0x1b, 0xdc, 0x33, 0xc4, - 0xc6, 0xc1, 0x83, 0x7b, 0x44, 0xe1, 0xd8, 0x43, 0xed, 0x98, 0x67, 0x45, 0x2e, 0xec, 0xa6, 0x36, - 0x47, 0xca, 0xfc, 0x5b, 0x8d, 0x10, 0x13, 0xc1, 0x0c, 0xb5, 0xe0, 0x57, 0xc9, 0xa9, 0xdd, 0xd6, - 0x93, 0xbf, 0x77, 0xe5, 0x27, 0xca, 0xbf, 0xaf, 0x64, 0xee, 0xa7, 0x92, 0x4f, 0x16, 0x3f, 0x42, - 0x63, 0xa4, 0x74, 0xe8, 0x3f, 0x46, 0x68, 0xc1, 0xc1, 0x37, 0x50, 0x63, 0x04, 0x93, 0xf2, 0xc1, - 0x20, 0xea, 0x13, 0x7f, 0x8d, 0x5a, 0x63, 0xfa, 0xac, 0x00, 0xf3, 0x5a, 0xde, 0x7e, 0x5d, 0x29, - 0x5a, 0xe8, 0x47, 0xc5, 0x26, 0x65, 0xd2, 0xdd, 0xfa, 0xb6, 0xe5, 0x1d, 0x5b, 0xc8, 0x7d, 0xc3, - 0x43, 0x87, 0x39, 0x42, 0xd1, 0xfc, 0xf1, 0x10, 0xb6, 0xa5, 0xbb, 0xfe, 0xe6, 0x4a, 0x5d, 0x57, - 0x6f, 0xd0, 0x62, 0x0b, 0x2a, 0x48, 0x90, 0x25, 0x17, 0x3c, 0x40, 0xbd, 0x25, 0x55, 0xdd, 0xdf, - 0xb5, 0xf0, 0xfa, 0x6c, 0xea, 0xf6, 0x96, 0xc4, 0xc9, 0x32, 0xc7, 0xfb, 0xc2, 0x0c, 0x4b, 0xf7, - 0x88, 0xdd, 0xf9, 0xfd, 0xb0, 0xf4, 0x8f, 0xec, 0x9e, 0x5f, 0xf2, 0xbb, 0x9d, 0xdf, 0xff, 0x70, - 0x6b, 0xcf, 0xff, 0xde, 0xa8, 0x85, 0x9b, 0xc7, 0x27, 0x4e, 0xed, 0xc5, 0x89, 0x53, 0x7b, 0x79, - 0xe2, 0xd4, 0x9e, 0xcf, 0x1c, 0xeb, 0x78, 0xe6, 0x58, 0x2f, 0x66, 0x8e, 0xf5, 0x72, 0xe6, 0x58, - 0xff, 0xcc, 0x1c, 0xeb, 0xb7, 0x57, 0x4e, 0xed, 0x51, 0x7d, 0x3c, 0xf8, 0x2f, 0x00, 0x00, 0xff, - 0xff, 0x9d, 0x8f, 0x4c, 0xfa, 0x70, 0x08, 0x00, 0x00, + // 906 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xc6, 0x7f, 0x62, 0x8f, 0x43, 0xda, 0x8e, 0xa0, 0x5a, 0x2c, 0xd5, 0x6b, 0x59, 0x50, + 0x19, 0x04, 0xbb, 0x38, 0x2a, 0x10, 0x0a, 0xe2, 0xb0, 0x69, 0x84, 0x2a, 0x52, 0x90, 0x26, 0x09, + 0x87, 0xc2, 0xa1, 0x93, 0xf5, 0xeb, 0x66, 0xea, 0xee, 0x1f, 0x66, 0x66, 0xad, 0xfa, 0xd6, 0x8f, + 0xc0, 0x91, 0x23, 0x5f, 0x80, 0xcf, 0xc0, 0x35, 0xc7, 0x1e, 0x8b, 0x84, 0x2c, 0xe2, 0x7e, 0x8b, + 0x9c, 0xd0, 0xcc, 0x8e, 0xd7, 0x8e, 0x13, 0xb7, 0x25, 0xb7, 0x99, 0xf7, 0x7e, 0xef, 0xf7, 0x7b, + 0xef, 0xcd, 0x7b, 0x83, 0x76, 0x86, 0xdb, 0xc2, 0x65, 0x89, 0x37, 0xcc, 0x8e, 0x80, 0xc7, 0x20, + 0x41, 0x78, 0x23, 0x88, 0x07, 0x09, 0xf7, 0x8c, 0x83, 0xa6, 0xcc, 0x0b, 0x80, 0x4b, 0xf6, 0x98, + 0x05, 0x54, 0xbb, 0xfb, 0x5e, 0x08, 0x31, 0x70, 0x2a, 0x61, 0xe0, 0xa6, 0x3c, 0x91, 0x09, 0x6e, + 0xe5, 0x58, 0x97, 0xa6, 0xcc, 0x5d, 0xc4, 0xba, 0xa3, 0x7e, 0xeb, 0xd3, 0x90, 0xc9, 0xe3, 0xec, + 0xc8, 0x0d, 0x92, 0xc8, 0x0b, 0x93, 0x30, 0xf1, 0x74, 0xc8, 0x51, 0xf6, 0x58, 0xdf, 0xf4, 0x45, + 0x9f, 0x72, 0xaa, 0x56, 0x77, 0x51, 0x36, 0xe1, 0x70, 0x89, 0x5c, 0xeb, 0xce, 0x1c, 0x13, 0xd1, + 0xe0, 0x98, 0xc5, 0xc0, 0xc7, 0x5e, 0x3a, 0x0c, 0x95, 0x41, 0x78, 0x11, 0x48, 0x7a, 0x59, 0x94, + 0xb7, 0x2a, 0x8a, 0x67, 0xb1, 0x64, 0x11, 0x5c, 0x08, 0xf8, 0xe2, 0x4d, 0x01, 0x22, 0x38, 0x86, + 0x88, 0x2e, 0xc7, 0x75, 0xff, 0x5a, 0x43, 0xef, 0xef, 0xcc, 0xbb, 0xb0, 0xcf, 0xc2, 0x98, 0xc5, + 0x21, 0x81, 0x5f, 0x33, 0x10, 0x12, 0x3f, 0x42, 0x75, 0x95, 0xe1, 0x80, 0x4a, 0x6a, 0x5b, 0x1d, + 0xab, 0xd7, 0xdc, 0xfa, 0xcc, 0x9d, 0xb7, 0xaf, 0x10, 0x72, 0xd3, 0x61, 0xa8, 0x0c, 0xc2, 0x55, + 0x68, 0x77, 0xd4, 0x77, 0x7f, 0x3c, 0x7a, 0x02, 0x81, 0x7c, 0x00, 0x92, 0xfa, 0xf8, 0x64, 0xe2, + 0x94, 0xa6, 0x13, 0x07, 0xcd, 0x6d, 0xa4, 0x60, 0xc5, 0x3f, 0xa3, 0x8a, 0x48, 0x21, 0xb0, 0xd7, + 0x34, 0xfb, 0x57, 0xee, 0xea, 0xc7, 0x71, 0x57, 0xa6, 0xb9, 0x9f, 0x42, 0xe0, 0x6f, 0x18, 0x99, + 0x8a, 0xba, 0x11, 0x4d, 0x8a, 0x03, 0x54, 0x13, 0x92, 0xca, 0x4c, 0xd8, 0x65, 0x4d, 0xff, 0xf5, + 0xd5, 0xe8, 0x35, 0x85, 0xbf, 0x69, 0x04, 0x6a, 0xf9, 0x9d, 0x18, 0xea, 0xee, 0xab, 0x32, 0xea, + 0xae, 0x8c, 0xdd, 0x49, 0xe2, 0x01, 0x93, 0x2c, 0x89, 0xf1, 0x36, 0xaa, 0xc8, 0x71, 0x0a, 0xba, + 0x8d, 0x0d, 0xff, 0x83, 0x59, 0xb6, 0x07, 0xe3, 0x14, 0xce, 0x26, 0xce, 0xbb, 0xcb, 0x78, 0x65, + 0x27, 0x3a, 0x02, 0xef, 0x15, 0x55, 0xd4, 0x74, 0xec, 0x9d, 0xf3, 0x89, 0x9c, 0x4d, 0x9c, 0x4b, + 0xe6, 0xd0, 0x2d, 0x98, 0xce, 0xa7, 0x8b, 0x6f, 0xa3, 0x1a, 0x07, 0x2a, 0x92, 0x58, 0xb7, 0xbc, + 0x31, 0x2f, 0x8b, 0x68, 0x2b, 0x31, 0x5e, 0xfc, 0x11, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x82, 0x6e, + 0x5e, 0xc3, 0xbf, 0x66, 0x80, 0xeb, 0x0f, 0x72, 0x33, 0x99, 0xf9, 0xf1, 0x13, 0xb4, 0xf9, 0x94, + 0x0a, 0x79, 0x98, 0x0e, 0xa8, 0x84, 0x03, 0x16, 0x81, 0x5d, 0xd1, 0xed, 0xfe, 0xf8, 0xed, 0x66, + 0x45, 0x45, 0xf8, 0x37, 0x0d, 0xfb, 0xe6, 0xde, 0x39, 0x26, 0xb2, 0xc4, 0x8c, 0x47, 0x08, 0x2b, + 0xcb, 0x01, 0xa7, 0xb1, 0xc8, 0x1b, 0xa5, 0xf4, 0xaa, 0xff, 0x5b, 0xaf, 0x65, 0xf4, 0xf0, 0xde, + 0x05, 0x36, 0x72, 0x89, 0x42, 0xf7, 0x6f, 0x0b, 0xdd, 0x5a, 0xf9, 0xca, 0x7b, 0x4c, 0x48, 0xfc, + 0xcb, 0x85, 0x5d, 0x71, 0xdf, 0x2e, 0x1f, 0x15, 0xad, 0x37, 0xe5, 0xba, 0xc9, 0xa9, 0x3e, 0xb3, + 0x2c, 0xec, 0xc9, 0x43, 0x54, 0x65, 0x12, 0x22, 0x61, 0xaf, 0x75, 0xca, 0xbd, 0xe6, 0xd6, 0xe7, + 0x57, 0x9a, 0x64, 0xff, 0x1d, 0xa3, 0x50, 0xbd, 0xaf, 0xb8, 0x48, 0x4e, 0xd9, 0xfd, 0xb3, 0xf2, + 0x9a, 0xda, 0xd4, 0x3a, 0xe1, 0x0f, 0xd1, 0x3a, 0xcf, 0xaf, 0xba, 0xb4, 0x0d, 0xbf, 0xa9, 0x06, + 0xc1, 0x20, 0xc8, 0xcc, 0x87, 0xb7, 0x10, 0x12, 0x2c, 0x8c, 0x81, 0xff, 0x40, 0x23, 0xb0, 0xd7, + 0xf5, 0xd8, 0x14, 0xeb, 0xbf, 0x5f, 0x78, 0xc8, 0x02, 0x0a, 0xef, 0xa0, 0x1b, 0xf0, 0x2c, 0x65, + 0x9c, 0xea, 0x59, 0x85, 0x20, 0x89, 0x07, 0xc2, 0xae, 0x77, 0xac, 0x5e, 0xd5, 0x7f, 0x6f, 0x3a, + 0x71, 0x6e, 0xec, 0x2e, 0x3b, 0xc9, 0x45, 0x3c, 0x76, 0x51, 0x2d, 0x53, 0xa3, 0x28, 0xec, 0x6a, + 0xa7, 0xdc, 0x6b, 0xf8, 0x37, 0xd5, 0x40, 0x1f, 0x6a, 0xcb, 0xd9, 0xc4, 0xa9, 0x7f, 0x0f, 0x63, + 0x7d, 0x21, 0x06, 0x85, 0x3f, 0x41, 0xf5, 0x4c, 0x00, 0x8f, 0x55, 0x9a, 0xf9, 0x1a, 0x14, 0xbd, + 0x3f, 0x34, 0x76, 0x52, 0x20, 0xf0, 0x2d, 0x54, 0xce, 0xd8, 0xc0, 0xac, 0x41, 0xd3, 0x00, 0xcb, + 0x87, 0xf7, 0xef, 0x11, 0x65, 0xc7, 0x5d, 0x54, 0x0b, 0x79, 0x92, 0xa5, 0xc2, 0xae, 0x68, 0x71, + 0xa4, 0xc4, 0xbf, 0xd3, 0x16, 0x62, 0x3c, 0x98, 0xa1, 0x2a, 0x3c, 0x93, 0x9c, 0xda, 0x35, 0xfd, + 0x7c, 0xf7, 0xae, 0xfc, 0xcf, 0xb9, 0xbb, 0x8a, 0x66, 0x37, 0x96, 0x7c, 0x3c, 0x7f, 0x4d, 0x6d, + 0x23, 0xb9, 0x42, 0xeb, 0x11, 0x42, 0x73, 0x0c, 0xbe, 0x8e, 0xca, 0x43, 0x18, 0xe7, 0xbf, 0x0e, + 0x51, 0x47, 0xfc, 0x0d, 0xaa, 0x8e, 0xe8, 0xd3, 0x0c, 0xcc, 0x97, 0x7b, 0xfb, 0x75, 0xa9, 0x68, + 0xa2, 0x9f, 0x14, 0x9a, 0xe4, 0x41, 0x77, 0xd7, 0xb6, 0xad, 0xee, 0x89, 0x85, 0x9c, 0x37, 0xfc, + 0x96, 0x98, 0x23, 0x14, 0xcc, 0x7e, 0x20, 0x61, 0x5b, 0xba, 0xea, 0x6f, 0xaf, 0x54, 0x75, 0xf1, + 0x91, 0xcd, 0x47, 0xa9, 0x30, 0x09, 0xb2, 0xa0, 0x82, 0xfb, 0xa8, 0xb9, 0xc0, 0xaa, 0xeb, 0xdb, + 0xf0, 0xaf, 0x4d, 0x27, 0x4e, 0x73, 0x81, 0x9c, 0x2c, 0x62, 0xba, 0x5f, 0x9a, 0x66, 0xe9, 0x1a, + 0xb1, 0x33, 0x5b, 0x32, 0x4b, 0x3f, 0x64, 0x63, 0x79, 0x53, 0xee, 0xd6, 0x7f, 0xff, 0xc3, 0x29, + 0x3d, 0xff, 0xa7, 0x53, 0xf2, 0x7b, 0x27, 0xa7, 0xed, 0xd2, 0x8b, 0xd3, 0x76, 0xe9, 0xe5, 0x69, + 0xbb, 0xf4, 0x7c, 0xda, 0xb6, 0x4e, 0xa6, 0x6d, 0xeb, 0xc5, 0xb4, 0x6d, 0xbd, 0x9c, 0xb6, 0xad, + 0x7f, 0xa7, 0x6d, 0xeb, 0xb7, 0x57, 0xed, 0xd2, 0xc3, 0xb5, 0x51, 0xff, 0xbf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x08, 0x6d, 0x53, 0xb5, 0x08, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { @@ -470,6 +472,11 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x40 + } i -= len(m.SignerName) copy(dAtA[i:], m.SignerName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) @@ -719,6 +726,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { } l = len(m.SignerName) n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } return n } @@ -827,6 +837,7 @@ func (this *CertificateSigningRequestSpec) String() string { `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, `Extra:` + mapStringForExtra + `,`, `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") return s @@ -1717,6 +1728,26 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } m.SignerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 839c1aa874fe2..1db9e324ebba2 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -44,7 +44,7 @@ message CertificateSigningRequest { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. - // Only the request, signerName, and usages fields can be set on creation. + // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. // Other fields are derived by Kubernetes and cannot be modified by users. optional CertificateSigningRequestSpec spec = 2; @@ -135,6 +135,30 @@ message CertificateSigningRequestSpec { // 6. Whether or not requests for CA certificates are allowed. optional string signerName = 7; + // expirationSeconds is the requested duration of validity of the issued + // certificate. The certificate signer may issue a certificate with a different + // validity duration so a client must check the delta between the notBefore and + // and notAfter fields in the issued certificate to determine the actual duration. + // + // The v1.22+ in-tree implementations of the well-known Kubernetes signers will + // honor this field as long as the requested duration is not greater than the + // maximum duration they will honor per the --cluster-signing-duration CLI + // flag to the Kubernetes controller manager. + // + // Certificate signers may not honor this field for various reasons: + // + // 1. Old signer that is unaware of the field (such as the in-tree + // implementations prior to v1.22) + // 2. Signer whose configured maximum is shorter than the requested duration + // 3. Signer whose configured minimum is longer than the requested duration + // + // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. + // + // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. + // + // +optional + optional int32 expirationSeconds = 8; + // usages specifies a set of key usages requested in the issued certificate. // // Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth". diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index 8d3b2305ecfc2..6403d4600961b 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -44,7 +44,7 @@ type CertificateSigningRequest struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec contains the certificate request, and is immutable after creation. - // Only the request, signerName, and usages fields can be set on creation. + // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. // Other fields are derived by Kubernetes and cannot be modified by users. Spec CertificateSigningRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` @@ -84,6 +84,30 @@ type CertificateSigningRequestSpec struct { // 6. Whether or not requests for CA certificates are allowed. SignerName string `json:"signerName" protobuf:"bytes,7,opt,name=signerName"` + // expirationSeconds is the requested duration of validity of the issued + // certificate. The certificate signer may issue a certificate with a different + // validity duration so a client must check the delta between the notBefore and + // and notAfter fields in the issued certificate to determine the actual duration. + // + // The v1.22+ in-tree implementations of the well-known Kubernetes signers will + // honor this field as long as the requested duration is not greater than the + // maximum duration they will honor per the --cluster-signing-duration CLI + // flag to the Kubernetes controller manager. + // + // Certificate signers may not honor this field for various reasons: + // + // 1. Old signer that is unaware of the field (such as the in-tree + // implementations prior to v1.22) + // 2. Signer whose configured maximum is shorter than the requested duration + // 3. Signer whose configured minimum is longer than the requested duration + // + // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. + // + // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. + // + // +optional + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty" protobuf:"varint,8,opt,name=expirationSeconds"` + // usages specifies a set of key usages requested in the issued certificate. // // Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth". @@ -254,6 +278,7 @@ type CertificateSigningRequestList struct { // KeyUsage specifies valid usage contexts for keys. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// +enum type KeyUsage string // Valid key usages diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 9a078fa0c456a..2714584eda534 100644 --- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ "": "CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued.\n\nKubelets use this API to obtain:\n 1. client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client-kubelet\" signerName).\n 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the \"kubernetes.io/kubelet-serving\" signerName).\n\nThis API can be used to request client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client\" signerName), or to obtain certificates from custom non-Kubernetes signers.", - "spec": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.", + "spec": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.", "status": "status contains information about whether the request is approved or denied, and the certificate issued by the signer, or the failure condition indicating signer failure.", } @@ -61,14 +61,15 @@ func (CertificateSigningRequestList) SwaggerDoc() map[string]string { } var map_CertificateSigningRequestSpec = map[string]string{ - "": "CertificateSigningRequestSpec contains the certificate request.", - "request": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.", - "signerName": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.", - "usages": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"", - "username": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", - "uid": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", - "groups": "groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", - "extra": "extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "": "CertificateSigningRequestSpec contains the certificate request.", + "request": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.", + "signerName": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.", + "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.\n\nAs of v1.22, this field is beta and is controlled via the CSRDuration feature gate.", + "usages": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"", + "username": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "uid": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "groups": "groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "extra": "extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", } func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go index cc6a60be78156..aefa6041174a8 100644 --- a/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -111,6 +112,11 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq *out = make([]byte, len(*in)) copy(*out, *in) } + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int32) + **out = **in + } if in.Usages != nil { in, out := &in.Usages, &out.Usages *out = make([]KeyUsage, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index f21256f484492..d9a29764418dc 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -229,62 +229,64 @@ func init() { } var fileDescriptor_09d156762b8218ef = []byte{ - // 878 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0x1c, 0x45, - 0x10, 0xde, 0xf1, 0xbe, 0x7b, 0x8d, 0x13, 0xb5, 0x50, 0x34, 0xac, 0x94, 0x19, 0x6b, 0x04, 0xc8, - 0x3c, 0xd2, 0x83, 0xa3, 0x08, 0x2c, 0x1f, 0x10, 0x8c, 0x89, 0xc0, 0xc2, 0x01, 0xa9, 0x6d, 0x73, - 0x40, 0x48, 0xa4, 0x77, 0xb6, 0x32, 0xee, 0x6c, 0xe6, 0xc1, 0x74, 0xcf, 0xc2, 0xde, 0xf2, 0x13, - 0x38, 0x72, 0xe4, 0xe7, 0x98, 0x03, 0x52, 0x8e, 0x39, 0xa0, 0x15, 0xde, 0xdc, 0xf9, 0x01, 0x3e, - 0xa1, 0xee, 0xe9, 0x9d, 0x5d, 0xbf, 0x70, 0x48, 0x6e, 0xdb, 0x5f, 0xd7, 0xf7, 0x7d, 0x55, 0x35, - 0xd5, 0xb5, 0xe8, 0xab, 0xd1, 0x96, 0x20, 0x3c, 0xf5, 0x47, 0xc5, 0x00, 0xf2, 0x04, 0x24, 0x08, - 0x7f, 0x0c, 0xc9, 0x30, 0xcd, 0x7d, 0x73, 0xc1, 0x32, 0xee, 0x87, 0x90, 0x4b, 0xfe, 0x88, 0x87, - 0x4c, 0x5f, 0x6f, 0x0e, 0x40, 0xb2, 0x4d, 0x3f, 0x82, 0x04, 0x72, 0x26, 0x61, 0x48, 0xb2, 0x3c, - 0x95, 0x29, 0x76, 0x4b, 0x02, 0x61, 0x19, 0x27, 0xcb, 0x04, 0x62, 0x08, 0xfd, 0x3b, 0x11, 0x97, - 0x47, 0xc5, 0x80, 0x84, 0x69, 0xec, 0x47, 0x69, 0x94, 0xfa, 0x9a, 0x37, 0x28, 0x1e, 0xe9, 0x93, - 0x3e, 0xe8, 0x5f, 0xa5, 0x5e, 0xdf, 0x5b, 0x4e, 0x20, 0xcd, 0xc1, 0x1f, 0x5f, 0xf0, 0xec, 0xdf, - 0x5b, 0xc4, 0xc4, 0x2c, 0x3c, 0xe2, 0x09, 0xe4, 0x13, 0x3f, 0x1b, 0x45, 0x0a, 0x10, 0x7e, 0x0c, - 0x92, 0x5d, 0xc6, 0xf2, 0xaf, 0x62, 0xe5, 0x45, 0x22, 0x79, 0x0c, 0x17, 0x08, 0x1f, 0x5f, 0x47, - 0x10, 0xe1, 0x11, 0xc4, 0xec, 0x3c, 0xcf, 0xfb, 0x63, 0x05, 0xbd, 0xb5, 0xb3, 0x68, 0xc5, 0x3e, - 0x8f, 0x12, 0x9e, 0x44, 0x14, 0x7e, 0x2a, 0x40, 0x48, 0xfc, 0x10, 0x75, 0x54, 0x86, 0x43, 0x26, - 0x99, 0x6d, 0xad, 0x5b, 0x1b, 0xbd, 0xbb, 0x1f, 0x91, 0x45, 0x0f, 0x2b, 0x23, 0x92, 0x8d, 0x22, - 0x05, 0x08, 0xa2, 0xa2, 0xc9, 0x78, 0x93, 0x7c, 0x3b, 0x78, 0x0c, 0xa1, 0x7c, 0x00, 0x92, 0x05, - 0xf8, 0x78, 0xea, 0xd6, 0x66, 0x53, 0x17, 0x2d, 0x30, 0x5a, 0xa9, 0xe2, 0x87, 0xa8, 0x21, 0x32, - 0x08, 0xed, 0x15, 0xad, 0xfe, 0x29, 0xb9, 0xe6, 0x0b, 0x91, 0x2b, 0x73, 0xdd, 0xcf, 0x20, 0x0c, - 0x56, 0x8d, 0x57, 0x43, 0x9d, 0xa8, 0x56, 0xc6, 0x47, 0xa8, 0x25, 0x24, 0x93, 0x85, 0xb0, 0xeb, - 0xda, 0xe3, 0xb3, 0xd7, 0xf0, 0xd0, 0x3a, 0xc1, 0x9a, 0x71, 0x69, 0x95, 0x67, 0x6a, 0xf4, 0xbd, - 0x17, 0x75, 0xe4, 0x5d, 0xc9, 0xdd, 0x49, 0x93, 0x21, 0x97, 0x3c, 0x4d, 0xf0, 0x16, 0x6a, 0xc8, - 0x49, 0x06, 0xba, 0xa1, 0xdd, 0xe0, 0xed, 0x79, 0xca, 0x07, 0x93, 0x0c, 0x4e, 0xa7, 0xee, 0x9b, - 0xe7, 0xe3, 0x15, 0x4e, 0x35, 0x03, 0xef, 0x55, 0xa5, 0xb4, 0x34, 0xf7, 0xde, 0xd9, 0x44, 0x4e, - 0xa7, 0xee, 0x25, 0x13, 0x49, 0x2a, 0xa5, 0xb3, 0xe9, 0xe2, 0x77, 0x51, 0x2b, 0x07, 0x26, 0xd2, - 0x44, 0x37, 0xbf, 0xbb, 0x28, 0x8b, 0x6a, 0x94, 0x9a, 0x5b, 0xfc, 0x1e, 0x6a, 0xc7, 0x20, 0x04, - 0x8b, 0x40, 0x77, 0xb0, 0x1b, 0xdc, 0x30, 0x81, 0xed, 0x07, 0x25, 0x4c, 0xe7, 0xf7, 0xf8, 0x31, - 0x5a, 0x7b, 0xc2, 0x84, 0x3c, 0xcc, 0x86, 0x4c, 0xc2, 0x01, 0x8f, 0xc1, 0x6e, 0xe8, 0x9e, 0xbf, - 0xff, 0x72, 0x53, 0xa3, 0x18, 0xc1, 0x2d, 0xa3, 0xbe, 0xb6, 0x77, 0x46, 0x89, 0x9e, 0x53, 0xc6, - 0x63, 0x84, 0x15, 0x72, 0x90, 0xb3, 0x44, 0x94, 0x8d, 0x52, 0x7e, 0xcd, 0xff, 0xed, 0xd7, 0x37, - 0x7e, 0x78, 0xef, 0x82, 0x1a, 0xbd, 0xc4, 0xc1, 0x9b, 0x5a, 0xe8, 0xf6, 0x95, 0x5f, 0x79, 0x8f, - 0x0b, 0x89, 0x7f, 0xb8, 0xf0, 0x6a, 0xc8, 0xcb, 0xe5, 0xa3, 0xd8, 0xfa, 0xcd, 0xdc, 0x34, 0x39, - 0x75, 0xe6, 0xc8, 0xd2, 0x8b, 0xf9, 0x11, 0x35, 0xb9, 0x84, 0x58, 0xd8, 0x2b, 0xeb, 0xf5, 0x8d, - 0xde, 0xdd, 0xed, 0x57, 0x1f, 0xe7, 0xe0, 0x0d, 0x63, 0xd3, 0xdc, 0x55, 0x82, 0xb4, 0xd4, 0xf5, - 0xfe, 0xa9, 0xff, 0x47, 0x81, 0xea, 0x61, 0xe1, 0x77, 0x50, 0x3b, 0x2f, 0x8f, 0xba, 0xbe, 0xd5, - 0xa0, 0xa7, 0xa6, 0xc1, 0x44, 0xd0, 0xf9, 0x1d, 0x26, 0x08, 0x09, 0x1e, 0x25, 0x90, 0x7f, 0xc3, - 0x62, 0xb0, 0xdb, 0xe5, 0x90, 0xa9, 0x4d, 0xb0, 0x5f, 0xa1, 0x74, 0x29, 0x02, 0x13, 0xd4, 0x2a, - 0xd4, 0x18, 0x09, 0xbb, 0xb9, 0x5e, 0xdf, 0xe8, 0x06, 0xb7, 0xd4, 0x30, 0x1e, 0x6a, 0xe4, 0x74, - 0xea, 0x76, 0xbe, 0x86, 0x89, 0x3e, 0x50, 0x13, 0x85, 0x3f, 0x44, 0x9d, 0x42, 0x40, 0x9e, 0x28, - 0xf5, 0x72, 0x84, 0xab, 0xbe, 0x1d, 0x1a, 0x9c, 0x56, 0x11, 0xf8, 0x36, 0xaa, 0x17, 0x7c, 0x68, - 0x46, 0xb8, 0x67, 0x02, 0xeb, 0x87, 0xbb, 0x5f, 0x50, 0x85, 0x63, 0x0f, 0xb5, 0xa2, 0x3c, 0x2d, - 0x32, 0x61, 0x37, 0xb4, 0x39, 0x52, 0xe6, 0x5f, 0x6a, 0x84, 0x9a, 0x1b, 0x9c, 0xa0, 0x26, 0xfc, - 0x22, 0x73, 0x66, 0xb7, 0x74, 0xeb, 0x77, 0x5f, 0x6f, 0x5b, 0x91, 0xfb, 0x4a, 0xeb, 0x7e, 0x22, - 0xf3, 0xc9, 0xe2, 0x4b, 0x68, 0x8c, 0x96, 0x36, 0x7d, 0x40, 0x68, 0x11, 0x83, 0x6f, 0xa2, 0xfa, - 0x08, 0x26, 0xe5, 0xda, 0xa0, 0xea, 0x27, 0xfe, 0x1c, 0x35, 0xc7, 0xec, 0x49, 0x01, 0x66, 0x7b, - 0x7e, 0x70, 0x6d, 0x3e, 0x5a, 0xed, 0x3b, 0x45, 0xa1, 0x25, 0x73, 0x7b, 0x65, 0xcb, 0xf2, 0xfe, - 0xb4, 0x90, 0x7b, 0xcd, 0xce, 0xc3, 0x3f, 0x23, 0x14, 0xce, 0xf7, 0x88, 0xb0, 0x2d, 0x5d, 0xff, - 0xce, 0xab, 0xd7, 0x5f, 0xed, 0xa4, 0xc5, 0xdf, 0x43, 0x05, 0x09, 0xba, 0x64, 0x85, 0x37, 0x51, - 0x6f, 0x49, 0x5a, 0x57, 0xba, 0x1a, 0xdc, 0x98, 0x4d, 0xdd, 0xde, 0x92, 0x38, 0x5d, 0x8e, 0xf1, - 0x3e, 0x31, 0x6d, 0xd3, 0x85, 0x62, 0x77, 0xfe, 0x5e, 0x2c, 0xfd, 0x5d, 0xbb, 0xe7, 0xe7, 0x7d, - 0xbb, 0xf3, 0xdb, 0xef, 0x6e, 0xed, 0xe9, 0x5f, 0xeb, 0xb5, 0xe0, 0xce, 0xf1, 0x89, 0x53, 0x7b, - 0x76, 0xe2, 0xd4, 0x9e, 0x9f, 0x38, 0xb5, 0xa7, 0x33, 0xc7, 0x3a, 0x9e, 0x39, 0xd6, 0xb3, 0x99, - 0x63, 0x3d, 0x9f, 0x39, 0xd6, 0xdf, 0x33, 0xc7, 0xfa, 0xf5, 0x85, 0x53, 0xfb, 0xbe, 0x6d, 0xaa, - 0xfb, 0x37, 0x00, 0x00, 0xff, 0xff, 0x21, 0x97, 0x54, 0xe9, 0x99, 0x08, 0x00, 0x00, + // 912 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xf6, 0xc6, 0xff, 0xe3, 0x92, 0xb6, 0x23, 0xa8, 0x16, 0x4b, 0xf5, 0x5a, 0x16, 0xa0, 0xf0, + 0xd3, 0x59, 0x52, 0x55, 0x10, 0xe5, 0x02, 0xc1, 0x86, 0x08, 0x22, 0x52, 0x90, 0x26, 0x09, 0x17, + 0x08, 0x89, 0x8e, 0xd7, 0xa7, 0x9b, 0xa9, 0xbb, 0x3f, 0xec, 0xcc, 0x9a, 0xfa, 0xae, 0x8f, 0xc0, + 0x25, 0x97, 0xbc, 0x03, 0x2f, 0x11, 0x2e, 0x90, 0x7a, 0xd9, 0x0b, 0x64, 0x11, 0xf7, 0x2d, 0x72, + 0x85, 0x66, 0x76, 0xbc, 0x76, 0xec, 0x84, 0x94, 0xf6, 0xce, 0xf3, 0xcd, 0xf9, 0xbe, 0xef, 0x9c, + 0xb3, 0xe7, 0x8c, 0xd1, 0xd7, 0xc3, 0x2d, 0x41, 0x78, 0xec, 0x0e, 0xb3, 0x3e, 0xa4, 0x11, 0x48, + 0x10, 0xee, 0x08, 0xa2, 0x41, 0x9c, 0xba, 0xe6, 0x82, 0x25, 0xdc, 0xf5, 0x21, 0x95, 0xfc, 0x21, + 0xf7, 0x99, 0xbe, 0xde, 0xec, 0x83, 0x64, 0x9b, 0x6e, 0x00, 0x11, 0xa4, 0x4c, 0xc2, 0x80, 0x24, + 0x69, 0x2c, 0x63, 0xec, 0xe4, 0x04, 0xc2, 0x12, 0x4e, 0x16, 0x09, 0xc4, 0x10, 0xda, 0x77, 0x02, + 0x2e, 0x8f, 0xb3, 0x3e, 0xf1, 0xe3, 0xd0, 0x0d, 0xe2, 0x20, 0x76, 0x35, 0xaf, 0x9f, 0x3d, 0xd4, + 0x27, 0x7d, 0xd0, 0xbf, 0x72, 0xbd, 0x76, 0x6f, 0x31, 0x81, 0x38, 0x05, 0x77, 0xb4, 0xe2, 0xd9, + 0xbe, 0x37, 0x8f, 0x09, 0x99, 0x7f, 0xcc, 0x23, 0x48, 0xc7, 0x6e, 0x32, 0x0c, 0x14, 0x20, 0xdc, + 0x10, 0x24, 0xbb, 0x88, 0xe5, 0x5e, 0xc6, 0x4a, 0xb3, 0x48, 0xf2, 0x10, 0x56, 0x08, 0x9f, 0x5c, + 0x45, 0x10, 0xfe, 0x31, 0x84, 0x6c, 0x99, 0xd7, 0xfb, 0x73, 0x0d, 0xbd, 0xbd, 0x33, 0x6f, 0xc5, + 0x01, 0x0f, 0x22, 0x1e, 0x05, 0x14, 0x7e, 0xce, 0x40, 0x48, 0xfc, 0x00, 0x35, 0x54, 0x86, 0x03, + 0x26, 0x99, 0x6d, 0x75, 0xad, 0x8d, 0xd6, 0xdd, 0x8f, 0xc9, 0xbc, 0x87, 0x85, 0x11, 0x49, 0x86, + 0x81, 0x02, 0x04, 0x51, 0xd1, 0x64, 0xb4, 0x49, 0xbe, 0xeb, 0x3f, 0x02, 0x5f, 0xde, 0x07, 0xc9, + 0x3c, 0x7c, 0x32, 0x71, 0x4a, 0xd3, 0x89, 0x83, 0xe6, 0x18, 0x2d, 0x54, 0xf1, 0x03, 0x54, 0x11, + 0x09, 0xf8, 0xf6, 0x9a, 0x56, 0xff, 0x8c, 0x5c, 0xf1, 0x85, 0xc8, 0xa5, 0xb9, 0x1e, 0x24, 0xe0, + 0x7b, 0xd7, 0x8c, 0x57, 0x45, 0x9d, 0xa8, 0x56, 0xc6, 0xc7, 0xa8, 0x26, 0x24, 0x93, 0x99, 0xb0, + 0xcb, 0xda, 0xe3, 0xf3, 0xd7, 0xf0, 0xd0, 0x3a, 0xde, 0xba, 0x71, 0xa9, 0xe5, 0x67, 0x6a, 0xf4, + 0x7b, 0x2f, 0xca, 0xa8, 0x77, 0x29, 0x77, 0x27, 0x8e, 0x06, 0x5c, 0xf2, 0x38, 0xc2, 0x5b, 0xa8, + 0x22, 0xc7, 0x09, 0xe8, 0x86, 0x36, 0xbd, 0x77, 0x66, 0x29, 0x1f, 0x8e, 0x13, 0x38, 0x9b, 0x38, + 0x6f, 0x2e, 0xc7, 0x2b, 0x9c, 0x6a, 0x06, 0xde, 0x2f, 0x4a, 0xa9, 0x69, 0xee, 0xbd, 0xf3, 0x89, + 0x9c, 0x4d, 0x9c, 0x0b, 0x26, 0x92, 0x14, 0x4a, 0xe7, 0xd3, 0xc5, 0xef, 0xa1, 0x5a, 0x0a, 0x4c, + 0xc4, 0x91, 0x6e, 0x7e, 0x73, 0x5e, 0x16, 0xd5, 0x28, 0x35, 0xb7, 0xf8, 0x7d, 0x54, 0x0f, 0x41, + 0x08, 0x16, 0x80, 0xee, 0x60, 0xd3, 0xbb, 0x6e, 0x02, 0xeb, 0xf7, 0x73, 0x98, 0xce, 0xee, 0xf1, + 0x23, 0xb4, 0xfe, 0x98, 0x09, 0x79, 0x94, 0x0c, 0x98, 0x84, 0x43, 0x1e, 0x82, 0x5d, 0xd1, 0x3d, + 0xff, 0xe0, 0xe5, 0xa6, 0x46, 0x31, 0xbc, 0x5b, 0x46, 0x7d, 0x7d, 0xff, 0x9c, 0x12, 0x5d, 0x52, + 0xc6, 0x23, 0x84, 0x15, 0x72, 0x98, 0xb2, 0x48, 0xe4, 0x8d, 0x52, 0x7e, 0xd5, 0xff, 0xed, 0xd7, + 0x36, 0x7e, 0x78, 0x7f, 0x45, 0x8d, 0x5e, 0xe0, 0xd0, 0x9b, 0x58, 0xe8, 0xf6, 0xa5, 0x5f, 0x79, + 0x9f, 0x0b, 0x89, 0x7f, 0x5c, 0xd9, 0x1a, 0xf2, 0x72, 0xf9, 0x28, 0xb6, 0xde, 0x99, 0x1b, 0x26, + 0xa7, 0xc6, 0x0c, 0x59, 0xd8, 0x98, 0x9f, 0x50, 0x95, 0x4b, 0x08, 0x85, 0xbd, 0xd6, 0x2d, 0x6f, + 0xb4, 0xee, 0x6e, 0xbf, 0xfa, 0x38, 0x7b, 0x6f, 0x18, 0x9b, 0xea, 0x9e, 0x12, 0xa4, 0xb9, 0x6e, + 0xef, 0x8f, 0xca, 0x7f, 0x14, 0xa8, 0x16, 0x0b, 0xbf, 0x8b, 0xea, 0x69, 0x7e, 0xd4, 0xf5, 0x5d, + 0xf3, 0x5a, 0x6a, 0x1a, 0x4c, 0x04, 0x9d, 0xdd, 0x61, 0x82, 0x90, 0xe0, 0x41, 0x04, 0xe9, 0xb7, + 0x2c, 0x04, 0xbb, 0x9e, 0x0f, 0x99, 0x7a, 0x09, 0x0e, 0x0a, 0x94, 0x2e, 0x44, 0xe0, 0x1d, 0x74, + 0x13, 0x9e, 0x24, 0x3c, 0x65, 0x7a, 0x58, 0xc1, 0x8f, 0xa3, 0x81, 0xb0, 0x1b, 0x5d, 0x6b, 0xa3, + 0xea, 0xbd, 0x35, 0x9d, 0x38, 0x37, 0x77, 0x97, 0x2f, 0xe9, 0x6a, 0x3c, 0x26, 0xa8, 0x96, 0xa9, + 0x59, 0x14, 0x76, 0xb5, 0x5b, 0xde, 0x68, 0x7a, 0xb7, 0xd4, 0x44, 0x1f, 0x69, 0xe4, 0x6c, 0xe2, + 0x34, 0xbe, 0x81, 0xb1, 0x3e, 0x50, 0x13, 0x85, 0x3f, 0x42, 0x8d, 0x4c, 0x40, 0x1a, 0xa9, 0x14, + 0xf3, 0x3d, 0x28, 0x9a, 0x7f, 0x64, 0x70, 0x5a, 0x44, 0xe0, 0xdb, 0xa8, 0x9c, 0xf1, 0x81, 0xd9, + 0x83, 0x96, 0x09, 0x2c, 0x1f, 0xed, 0x7d, 0x49, 0x15, 0x8e, 0x7b, 0xa8, 0x16, 0xa4, 0x71, 0x96, + 0x08, 0xbb, 0xa2, 0xcd, 0x91, 0x32, 0xff, 0x4a, 0x23, 0xd4, 0xdc, 0xe0, 0x08, 0x55, 0xe1, 0x89, + 0x4c, 0x99, 0x5d, 0xd3, 0xdf, 0x6f, 0xef, 0xf5, 0x9e, 0x3c, 0xb2, 0xab, 0xb4, 0x76, 0x23, 0x99, + 0x8e, 0xe7, 0x9f, 0x53, 0x63, 0x34, 0xb7, 0x69, 0x03, 0x42, 0xf3, 0x18, 0x7c, 0x03, 0x95, 0x87, + 0x30, 0xce, 0xdf, 0x1e, 0xaa, 0x7e, 0xe2, 0x2f, 0x50, 0x75, 0xc4, 0x1e, 0x67, 0x60, 0x9e, 0xe0, + 0x0f, 0xaf, 0xcc, 0x47, 0xab, 0x7d, 0xaf, 0x28, 0x34, 0x67, 0x6e, 0xaf, 0x6d, 0x59, 0xbd, 0xbf, + 0x2c, 0xe4, 0x5c, 0xf1, 0x70, 0xe2, 0x5f, 0x10, 0xf2, 0x67, 0x8f, 0x91, 0xb0, 0x2d, 0x5d, 0xff, + 0xce, 0xab, 0xd7, 0x5f, 0x3c, 0x6c, 0xf3, 0xff, 0x98, 0x02, 0x12, 0x74, 0xc1, 0x0a, 0x6f, 0xa2, + 0xd6, 0x82, 0xb4, 0xae, 0xf4, 0x9a, 0x77, 0x7d, 0x3a, 0x71, 0x5a, 0x0b, 0xe2, 0x74, 0x31, 0xa6, + 0xf7, 0xa9, 0x69, 0x9b, 0x2e, 0x14, 0x3b, 0xb3, 0xa5, 0xb3, 0xf4, 0x77, 0x6d, 0x2e, 0x2f, 0xcd, + 0x76, 0xe3, 0xb7, 0xdf, 0x9d, 0xd2, 0xd3, 0xbf, 0xbb, 0x25, 0xef, 0xce, 0xc9, 0x69, 0xa7, 0xf4, + 0xec, 0xb4, 0x53, 0x7a, 0x7e, 0xda, 0x29, 0x3d, 0x9d, 0x76, 0xac, 0x93, 0x69, 0xc7, 0x7a, 0x36, + 0xed, 0x58, 0xcf, 0xa7, 0x1d, 0xeb, 0x9f, 0x69, 0xc7, 0xfa, 0xf5, 0x45, 0xa7, 0xf4, 0x43, 0xdd, + 0x54, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x95, 0x4b, 0xd3, 0xe2, 0xde, 0x08, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { @@ -470,6 +472,11 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x40 + } if m.SignerName != nil { i -= len(*m.SignerName) copy(dAtA[i:], *m.SignerName) @@ -723,6 +730,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { l = len(*m.SignerName) n += 1 + l + sovGenerated(uint64(l)) } + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } return n } @@ -831,6 +841,7 @@ func (this *CertificateSigningRequestSpec) String() string { `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, `Extra:` + mapStringForExtra + `,`, `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") return s @@ -1722,6 +1733,26 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.SignerName = &s iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 73631fb77c622..904e9520733cb 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -34,8 +34,9 @@ message CertificateSigningRequest { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The certificate request itself and any additional information. - // +optional + // spec contains the certificate request, and is immutable after creation. + // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. + // Other fields are derived by Kubernetes and cannot be modified by users. optional CertificateSigningRequestSpec spec = 2; // Derived information about the request. @@ -80,9 +81,7 @@ message CertificateSigningRequestList { repeated CertificateSigningRequest items = 2; } -// This information is immutable after the request is created. Only the Request -// and Usages fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. +// CertificateSigningRequestSpec contains the certificate request. message CertificateSigningRequestSpec { // Base64-encoded PKCS#10 CSR data // +listType=atomic @@ -101,6 +100,30 @@ message CertificateSigningRequestSpec { // +optional optional string signerName = 7; + // expirationSeconds is the requested duration of validity of the issued + // certificate. The certificate signer may issue a certificate with a different + // validity duration so a client must check the delta between the notBefore and + // and notAfter fields in the issued certificate to determine the actual duration. + // + // The v1.22+ in-tree implementations of the well-known Kubernetes signers will + // honor this field as long as the requested duration is not greater than the + // maximum duration they will honor per the --cluster-signing-duration CLI + // flag to the Kubernetes controller manager. + // + // Certificate signers may not honor this field for various reasons: + // + // 1. Old signer that is unaware of the field (such as the in-tree + // implementations prior to v1.22) + // 2. Signer whose configured maximum is shorter than the requested duration + // 3. Signer whose configured minimum is longer than the requested duration + // + // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. + // + // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. + // + // +optional + optional int32 expirationSeconds = 8; + // allowedUsages specifies a set of usage contexts the key will be // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index 9e61c67ff4d17..031ef77550ba9 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -36,18 +36,17 @@ type CertificateSigningRequest struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The certificate request itself and any additional information. - // +optional - Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // spec contains the certificate request, and is immutable after creation. + // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. + // Other fields are derived by Kubernetes and cannot be modified by users. + Spec CertificateSigningRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Derived information about the request. // +optional Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// This information is immutable after the request is created. Only the Request -// and Usages fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. +// CertificateSigningRequestSpec contains the certificate request. type CertificateSigningRequestSpec struct { // Base64-encoded PKCS#10 CSR data // +listType=atomic @@ -66,6 +65,30 @@ type CertificateSigningRequestSpec struct { // +optional SignerName *string `json:"signerName,omitempty" protobuf:"bytes,7,opt,name=signerName"` + // expirationSeconds is the requested duration of validity of the issued + // certificate. The certificate signer may issue a certificate with a different + // validity duration so a client must check the delta between the notBefore and + // and notAfter fields in the issued certificate to determine the actual duration. + // + // The v1.22+ in-tree implementations of the well-known Kubernetes signers will + // honor this field as long as the requested duration is not greater than the + // maximum duration they will honor per the --cluster-signing-duration CLI + // flag to the Kubernetes controller manager. + // + // Certificate signers may not honor this field for various reasons: + // + // 1. Old signer that is unaware of the field (such as the in-tree + // implementations prior to v1.22) + // 2. Signer whose configured maximum is shorter than the requested duration + // 3. Signer whose configured minimum is longer than the requested duration + // + // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. + // + // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. + // + // +optional + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty" protobuf:"varint,8,opt,name=expirationSeconds"` + // allowedUsages specifies a set of usage contexts the key will be // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index 396fee0bf6183..816a618cb9e2f 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ "": "Describes a certificate signing request", - "spec": "The certificate request itself and any additional information.", + "spec": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.", "status": "Derived information about the request.", } @@ -51,14 +51,15 @@ func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { } var map_CertificateSigningRequestSpec = map[string]string{ - "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", - "request": "Base64-encoded PKCS#10 CSR data", - "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", - "username": "Information about the requesting user. See user.Info interface for details.", - "uid": "UID information about the requesting user. See user.Info interface for details.", - "groups": "Group information about the requesting user. See user.Info interface for details.", - "extra": "Extra information about the requesting user. See user.Info interface for details.", + "": "CertificateSigningRequestSpec contains the certificate request.", + "request": "Base64-encoded PKCS#10 CSR data", + "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", + "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.\n\nAs of v1.22, this field is beta and is controlled via the CSRDuration feature gate.", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", } func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 0463f5bb0c417..a315e2ac60513 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -116,6 +117,11 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq *out = new(string) **out = **in } + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int32) + **out = **in + } if in.Usages != nil { in, out := &in.Usages, &out.Usages *out = make([]KeyUsage, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go index 6ccebfe6fbeda..480a3293613c6 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 2dd7eddbcdb2f..99f6b0be7ac4e 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index de6962137e289..3adfd8720384a 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go index 9d18b9225a096..18926aa1080cd 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 612f6aa747a61..eb9517e1dd978 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -23,9 +23,6 @@ const ( // webhook backend fails. ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation - PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" @@ -124,7 +121,7 @@ const ( EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" // EndpointsOverCapacity will be set on an Endpoints resource when it - // exceeds the maximum capacity of 1000 addresses. Inititially the Endpoints + // exceeds the maximum capacity of 1000 addresses. Initially the Endpoints // controller will set this annotation with a value of "warning". In a // future release, the controller may set this annotation with a value of // "truncated" to indicate that any addresses exceeding the limit of 1000 @@ -144,11 +141,11 @@ const ( // pod deletion order. // The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted. // - // This annotation is alpha-level and is only honored when PodDeletionCost feature is enabled. + // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" // AnnotationTopologyAwareHints can be used to enable or disable Topology - // Aware Hints for a Service. This may be set to "auto" or "disabled". Any - // other value is treated as "disabled". + // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any + // other value is treated as "Disabled". AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 286a82ecd633b..0418699e63346 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -1421,38 +1421,10 @@ func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo -func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } -func (*EphemeralContainers) ProtoMessage() {} -func (*EphemeralContainers) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} -} -func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EphemeralContainers) XXX_Merge(src proto.Message) { - xxx_messageInfo_EphemeralContainers.Merge(m, src) -} -func (m *EphemeralContainers) XXX_Size() int { - return m.Size() -} -func (m *EphemeralContainers) XXX_DiscardUnknown() { - xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) -} - -var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo - func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1452,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1480,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1508,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1536,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1564,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1592,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1620,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1648,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1676,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1704,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1757,6 +1729,34 @@ func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo +func (m *GRPCAction) Reset() { *m = GRPCAction{} } +func (*GRPCAction) ProtoMessage() {} +func (*GRPCAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GRPCAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GRPCAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GRPCAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_GRPCAction.Merge(m, src) +} +func (m *GRPCAction) XXX_Size() int { + return m.Size() +} +func (m *GRPCAction) XXX_DiscardUnknown() { + xxx_messageInfo_GRPCAction.DiscardUnknown(m) +} + +var xxx_messageInfo_GRPCAction proto.InternalMessageInfo + func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { @@ -1897,38 +1897,10 @@ func (m *HTTPHeader) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} -} -func (m *Handler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Handler) XXX_Merge(src proto.Message) { - xxx_messageInfo_Handler.Merge(m, src) -} -func (m *Handler) XXX_Size() int { - return m.Size() -} -func (m *Handler) XXX_DiscardUnknown() { - xxx_messageInfo_Handler.DiscardUnknown(m) -} - -var xxx_messageInfo_Handler proto.InternalMessageInfo - func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1928,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +1956,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +1984,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2012,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2040,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2093,6 +2065,34 @@ func (m *Lifecycle) XXX_DiscardUnknown() { var xxx_messageInfo_Lifecycle proto.InternalMessageInfo +func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } +func (*LifecycleHandler) ProtoMessage() {} +func (*LifecycleHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHandler.Merge(m, src) +} +func (m *LifecycleHandler) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHandler) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHandler.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo + func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { @@ -3717,10 +3717,38 @@ func (m *PodLogOptions) XXX_DiscardUnknown() { var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo +func (m *PodOS) Reset() { *m = PodOS{} } +func (*PodOS) ProtoMessage() {} +func (*PodOS) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodOS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodOS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodOS) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodOS.Merge(m, src) +} +func (m *PodOS) XXX_Size() int { + return m.Size() +} +func (m *PodOS) XXX_DiscardUnknown() { + xxx_messageInfo_PodOS.DiscardUnknown(m) +} + +var xxx_messageInfo_PodOS proto.InternalMessageInfo + func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3944,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +3972,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4000,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4028,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4056,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4193,10 +4221,38 @@ func (m *Probe) XXX_DiscardUnknown() { var xxx_messageInfo_Probe proto.InternalMessageInfo +func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } +func (*ProbeHandler) ProtoMessage() {} +func (*ProbeHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProbeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProbeHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeHandler.Merge(m, src) +} +func (m *ProbeHandler) XXX_Size() int { + return m.Size() +} +func (m *ProbeHandler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeHandler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo + func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4280,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4308,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4336,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4364,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4392,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4420,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4448,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4476,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4504,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4532,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4560,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4588,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4616,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4644,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4672,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4700,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4728,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4756,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4784,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4812,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4840,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4868,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4896,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4924,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4952,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4980,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5008,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5036,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5064,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5092,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5120,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5148,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5176,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5204,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5232,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5260,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5288,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5316,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5344,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5372,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5400,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5428,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5456,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5484,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5512,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5540,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5568,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5596,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5624,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5652,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5680,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5708,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5736,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5764,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5792,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5820,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5848,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5876,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5904,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5927,7 +5983,6 @@ func init() { proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") - proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") proto.RegisterType((*EphemeralVolumeSource)(nil), "k8s.io.api.core.v1.EphemeralVolumeSource") proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") @@ -5941,18 +5996,19 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GRPCAction)(nil), "k8s.io.api.core.v1.GRPCAction") proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler") proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") @@ -6000,6 +6056,7 @@ func init() { proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate") proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") @@ -6021,6 +6078,7 @@ func init() { proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodOS)(nil), "k8s.io.api.core.v1.PodOS") proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") @@ -6040,6 +6098,7 @@ func init() { proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProbeHandler)(nil), "k8s.io.api.core.v1.ProbeHandler") proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") @@ -6116,887 +6175,897 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14066 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x1c, 0xd9, - 0x75, 0x18, 0xac, 0x9e, 0xc1, 0x6b, 0x0e, 0xde, 0x17, 0x24, 0x17, 0xc4, 0x2e, 0x09, 0x6e, 0x53, - 0xe2, 0x72, 0xb5, 0xbb, 0xa0, 0xb8, 0x0f, 0x69, 0xbd, 0x2b, 0xad, 0x05, 0x60, 0x00, 0x72, 0x96, - 0x04, 0x38, 0x7b, 0x07, 0x24, 0x25, 0x79, 0xa5, 0x52, 0x63, 0xe6, 0x02, 0x68, 0x61, 0xa6, 0x7b, - 0xb6, 0xbb, 0x07, 0x24, 0xf6, 0x93, 0xeb, 0xf3, 0x27, 0x3f, 0xe5, 0xc7, 0x57, 0xaa, 0x94, 0xf3, - 0xb2, 0x5d, 0xae, 0x94, 0xe3, 0x54, 0xac, 0x38, 0x49, 0xc5, 0xb1, 0x63, 0x3b, 0x96, 0x13, 0x3b, - 0x71, 0x1e, 0x4e, 0x7e, 0x38, 0x8e, 0x2b, 0xb1, 0x5c, 0xe5, 0x0a, 0x62, 0xd3, 0x49, 0xb9, 0xf4, - 0x23, 0xb6, 0x13, 0x3b, 0x3f, 0x82, 0xb8, 0xe2, 0xd4, 0x7d, 0xf6, 0xbd, 0xfd, 0x98, 0x19, 0x70, - 0x41, 0x68, 0xa5, 0xda, 0x7f, 0x33, 0xf7, 0x9c, 0x7b, 0xee, 0xed, 0xfb, 0x3c, 0xf7, 0x3c, 0xe1, - 0xd5, 0xdd, 0x97, 0xc3, 0x05, 0xd7, 0xbf, 0xb2, 0xdb, 0xd9, 0x24, 0x81, 0x47, 0x22, 0x12, 0x5e, - 0xd9, 0x23, 0x5e, 0xc3, 0x0f, 0xae, 0x08, 0x80, 0xd3, 0x76, 0xaf, 0xd4, 0xfd, 0x80, 0x5c, 0xd9, - 0xbb, 0x7a, 0x65, 0x9b, 0x78, 0x24, 0x70, 0x22, 0xd2, 0x58, 0x68, 0x07, 0x7e, 0xe4, 0x23, 0xc4, - 0x71, 0x16, 0x9c, 0xb6, 0xbb, 0x40, 0x71, 0x16, 0xf6, 0xae, 0xce, 0x3d, 0xb7, 0xed, 0x46, 0x3b, - 0x9d, 0xcd, 0x85, 0xba, 0xdf, 0xba, 0xb2, 0xed, 0x6f, 0xfb, 0x57, 0x18, 0xea, 0x66, 0x67, 0x8b, - 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0xee, 0xc5, 0xb8, 0x99, 0x96, 0x53, 0xdf, 0x71, 0x3d, - 0x12, 0xec, 0x5f, 0x69, 0xef, 0x6e, 0xb3, 0x76, 0x03, 0x12, 0xfa, 0x9d, 0xa0, 0x4e, 0x92, 0x0d, - 0x77, 0xad, 0x15, 0x5e, 0x69, 0x91, 0xc8, 0xc9, 0xe8, 0xee, 0xdc, 0x95, 0xbc, 0x5a, 0x41, 0xc7, - 0x8b, 0xdc, 0x56, 0xba, 0x99, 0x0f, 0xf7, 0xaa, 0x10, 0xd6, 0x77, 0x48, 0xcb, 0x49, 0xd5, 0x7b, - 0x21, 0xaf, 0x5e, 0x27, 0x72, 0x9b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0xaa, - 0x05, 0x17, 0x16, 0xef, 0xd6, 0x56, 0x9a, 0x4e, 0x18, 0xb9, 0xf5, 0xa5, 0xa6, 0x5f, 0xdf, 0xad, - 0x45, 0x7e, 0x40, 0xee, 0xf8, 0xcd, 0x4e, 0x8b, 0xd4, 0xd8, 0x40, 0xa0, 0x67, 0x61, 0x64, 0x8f, - 0xfd, 0xaf, 0x94, 0x67, 0xad, 0x0b, 0xd6, 0xe5, 0xd2, 0xd2, 0xd4, 0xaf, 0x1f, 0xcc, 0xbf, 0xef, - 0xc1, 0xc1, 0xfc, 0xc8, 0x1d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x43, 0x5b, 0xe1, 0xc6, 0x7e, - 0x9b, 0xcc, 0x16, 0x18, 0xee, 0x84, 0xc0, 0x1d, 0x5a, 0xad, 0xd1, 0x52, 0x2c, 0xa0, 0xe8, 0x0a, - 0x94, 0xda, 0x4e, 0x10, 0xb9, 0x91, 0xeb, 0x7b, 0xb3, 0xc5, 0x0b, 0xd6, 0xe5, 0xc1, 0xa5, 0x69, - 0x81, 0x5a, 0xaa, 0x4a, 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0x34, 0x6e, 0x79, 0xcd, 0xfd, - 0xd9, 0x81, 0x0b, 0xd6, 0xe5, 0x91, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x48, 0x01, - 0x46, 0x16, 0xb7, 0xb6, 0x5c, 0xcf, 0x8d, 0xf6, 0xd1, 0x1d, 0x18, 0xf3, 0xfc, 0x06, 0x91, 0xff, - 0xd9, 0x57, 0x8c, 0x3e, 0x7f, 0x61, 0x21, 0xbd, 0x94, 0x16, 0xd6, 0x35, 0xbc, 0xa5, 0xa9, 0x07, - 0x07, 0xf3, 0x63, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0xa3, 0x6d, 0xbf, 0xa1, 0xc8, 0x16, 0x18, - 0xd9, 0xf9, 0x2c, 0xb2, 0xd5, 0x18, 0x6d, 0x69, 0xf2, 0xc1, 0xc1, 0xfc, 0xa8, 0x56, 0x80, 0x75, - 0x22, 0x68, 0x13, 0x26, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdd, 0x22, 0xa3, 0x7b, 0x31, 0x8f, 0xae, - 0x86, 0xba, 0x34, 0xf3, 0xe0, 0x60, 0x7e, 0x32, 0x51, 0x88, 0x93, 0x04, 0xed, 0xb7, 0x61, 0x62, - 0x31, 0x8a, 0x9c, 0xfa, 0x0e, 0x69, 0xf0, 0x19, 0x44, 0x2f, 0xc2, 0x80, 0xe7, 0xb4, 0x88, 0x98, - 0xdf, 0x0b, 0x62, 0x60, 0x07, 0xd6, 0x9d, 0x16, 0x39, 0x3c, 0x98, 0x9f, 0xba, 0xed, 0xb9, 0x6f, - 0x75, 0xc4, 0xaa, 0xa0, 0x65, 0x98, 0x61, 0xa3, 0xe7, 0x01, 0x1a, 0x64, 0xcf, 0xad, 0x93, 0xaa, - 0x13, 0xed, 0x88, 0xf9, 0x46, 0xa2, 0x2e, 0x94, 0x15, 0x04, 0x6b, 0x58, 0xf6, 0x7d, 0x28, 0x2d, - 0xee, 0xf9, 0x6e, 0xa3, 0xea, 0x37, 0x42, 0xb4, 0x0b, 0x93, 0xed, 0x80, 0x6c, 0x91, 0x40, 0x15, - 0xcd, 0x5a, 0x17, 0x8a, 0x97, 0x47, 0x9f, 0xbf, 0x9c, 0xf9, 0xb1, 0x26, 0xea, 0x8a, 0x17, 0x05, - 0xfb, 0x4b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0x8b, 0x02, 0x9c, 0x5e, - 0x7c, 0xbb, 0x13, 0x90, 0xb2, 0x1b, 0xee, 0x26, 0x57, 0x78, 0xc3, 0x0d, 0x77, 0xd7, 0xe3, 0x11, - 0x50, 0x4b, 0xab, 0x2c, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc1, 0x30, 0xfd, 0x7d, 0x1b, 0x57, 0xc4, - 0x27, 0xcf, 0x08, 0xe4, 0xd1, 0xb2, 0x13, 0x39, 0x65, 0x0e, 0xc2, 0x12, 0x07, 0xad, 0xc1, 0x68, - 0x9d, 0x6d, 0xc8, 0xed, 0x35, 0xbf, 0x41, 0xd8, 0x64, 0x96, 0x96, 0x9e, 0xa1, 0xe8, 0xcb, 0x71, - 0xf1, 0xe1, 0xc1, 0xfc, 0x2c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, - 0x0d, 0x30, 0x4a, 0x90, 0xb1, 0xb7, 0x2e, 0x6b, 0x5b, 0x65, 0x90, 0x6d, 0x95, 0xb1, 0xec, 0x6d, - 0x82, 0xae, 0xc2, 0xc0, 0xae, 0xeb, 0x35, 0x66, 0x87, 0x18, 0xad, 0x73, 0x74, 0xce, 0x6f, 0xb8, - 0x5e, 0xe3, 0xf0, 0x60, 0x7e, 0xda, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x53, 0x0b, 0xe6, - 0x19, 0x6c, 0xd5, 0x6d, 0x92, 0x2a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x03, 0xfa, 0x3c, - 0x40, 0x48, 0xea, 0x01, 0x89, 0xb4, 0x21, 0x55, 0x0b, 0xa3, 0xa6, 0x20, 0x58, 0xc3, 0xa2, 0x07, - 0x42, 0xb8, 0xe3, 0x04, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x07, 0x42, 0x4d, 0x02, 0x70, 0x8c, 0x63, - 0x1c, 0x08, 0xc5, 0x5e, 0x07, 0x02, 0xfa, 0x18, 0x4c, 0xc6, 0x8d, 0x85, 0x6d, 0xa7, 0x2e, 0x07, - 0x90, 0x6d, 0x99, 0x9a, 0x09, 0xc2, 0x49, 0x5c, 0xfb, 0xef, 0x58, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, - 0x97, 0x7f, 0xab, 0xfd, 0x8b, 0x16, 0x0c, 0x2f, 0xb9, 0x5e, 0xc3, 0xf5, 0xb6, 0xd1, 0x67, 0x61, - 0x84, 0xde, 0x4d, 0x0d, 0x27, 0x72, 0xc4, 0xb9, 0xf7, 0x21, 0x6d, 0x6f, 0xa9, 0xab, 0x62, 0xa1, - 0xbd, 0xbb, 0x4d, 0x0b, 0xc2, 0x05, 0x8a, 0x4d, 0x77, 0xdb, 0xad, 0xcd, 0xcf, 0x91, 0x7a, 0xb4, - 0x46, 0x22, 0x27, 0xfe, 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x0c, 0x45, 0x4e, 0xb0, 0x4d, - 0x22, 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x75, 0x12, 0x5f, 0x0b, - 0x1b, 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1a, 0x86, 0xb3, 0xcb, 0xb5, 0x4a, 0xce, 0xba, 0xba, - 0x04, 0x43, 0x8d, 0xc0, 0xdd, 0x23, 0x81, 0x18, 0x67, 0x45, 0xa5, 0xcc, 0x4a, 0xb1, 0x80, 0xa2, - 0x97, 0x61, 0x8c, 0x5f, 0x48, 0xd7, 0x1d, 0xaf, 0xd1, 0x94, 0x43, 0x7c, 0x4a, 0x60, 0x8f, 0xdd, - 0xd1, 0x60, 0xd8, 0xc0, 0x3c, 0xe2, 0xa2, 0xba, 0x94, 0xd8, 0x8c, 0x79, 0x97, 0xdd, 0x17, 0x2d, - 0x98, 0xe2, 0xcd, 0x2c, 0x46, 0x51, 0xe0, 0x6e, 0x76, 0x22, 0x12, 0xce, 0x0e, 0xb2, 0x93, 0x6e, - 0x39, 0x6b, 0xb4, 0x72, 0x47, 0x60, 0xe1, 0x4e, 0x82, 0x0a, 0x3f, 0x04, 0x67, 0x45, 0xbb, 0x53, - 0x49, 0x30, 0x4e, 0x35, 0x8b, 0xbe, 0xd3, 0x82, 0xb9, 0xba, 0xef, 0x45, 0x81, 0xdf, 0x6c, 0x92, - 0xa0, 0xda, 0xd9, 0x6c, 0xba, 0xe1, 0x0e, 0x5f, 0xa7, 0x98, 0x6c, 0xb1, 0x93, 0x20, 0x67, 0x0e, - 0x15, 0x92, 0x98, 0xc3, 0xf3, 0x0f, 0x0e, 0xe6, 0xe7, 0x96, 0x73, 0x49, 0xe1, 0x2e, 0xcd, 0xa0, - 0x5d, 0x40, 0xf4, 0x2a, 0xad, 0x45, 0xce, 0x36, 0x89, 0x1b, 0x1f, 0xee, 0xbf, 0xf1, 0x33, 0x0f, - 0x0e, 0xe6, 0xd1, 0x7a, 0x8a, 0x04, 0xce, 0x20, 0x8b, 0xde, 0x82, 0x53, 0xb4, 0x34, 0xf5, 0xad, - 0x23, 0xfd, 0x37, 0x37, 0xfb, 0xe0, 0x60, 0xfe, 0xd4, 0x7a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0x7d, - 0x87, 0x05, 0x67, 0xe3, 0xcf, 0x5f, 0xb9, 0xdf, 0x76, 0xbc, 0x46, 0xdc, 0x70, 0xa9, 0xff, 0x86, - 0xe9, 0x99, 0x7c, 0x76, 0x39, 0x8f, 0x12, 0xce, 0x6f, 0x64, 0x6e, 0x19, 0x4e, 0x67, 0xae, 0x16, - 0x34, 0x05, 0xc5, 0x5d, 0xc2, 0xb9, 0xa0, 0x12, 0xa6, 0x3f, 0xd1, 0x29, 0x18, 0xdc, 0x73, 0x9a, - 0x1d, 0xb1, 0x51, 0x30, 0xff, 0xf3, 0x4a, 0xe1, 0x65, 0xcb, 0xfe, 0x97, 0x45, 0x98, 0x5c, 0xae, - 0x55, 0x1e, 0x6a, 0x17, 0xea, 0xd7, 0x50, 0xa1, 0xeb, 0x35, 0x14, 0x5f, 0x6a, 0xc5, 0xdc, 0x4b, - 0xed, 0xff, 0xcd, 0xd8, 0x42, 0x03, 0x6c, 0x0b, 0x7d, 0x4b, 0xce, 0x16, 0x3a, 0xe6, 0x8d, 0xb3, - 0x97, 0xb3, 0x8a, 0x06, 0xd9, 0x64, 0x66, 0x72, 0x2c, 0x37, 0xfd, 0xba, 0xd3, 0x4c, 0x1e, 0x7d, - 0x47, 0x5c, 0x4a, 0xc7, 0x33, 0x8f, 0x75, 0x18, 0x5b, 0x76, 0xda, 0xce, 0xa6, 0xdb, 0x74, 0x23, - 0x97, 0x84, 0xe8, 0x29, 0x28, 0x3a, 0x8d, 0x06, 0xe3, 0xb6, 0x4a, 0x4b, 0xa7, 0x1f, 0x1c, 0xcc, - 0x17, 0x17, 0x1b, 0xf4, 0xda, 0x07, 0x85, 0xb5, 0x8f, 0x29, 0x06, 0xfa, 0x20, 0x0c, 0x34, 0x02, - 0xbf, 0x3d, 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x94, 0x03, 0xbf, 0x9d, 0x40, 0x65, 0x38, 0xf6, - 0xaf, 0x16, 0xe0, 0x89, 0x65, 0xd2, 0xde, 0x59, 0xad, 0xe5, 0x9c, 0xdf, 0x97, 0x61, 0xa4, 0xe5, - 0x7b, 0x6e, 0xe4, 0x07, 0xa1, 0x68, 0x9a, 0xad, 0x88, 0x35, 0x51, 0x86, 0x15, 0x14, 0x5d, 0x80, - 0x81, 0x76, 0xcc, 0x54, 0x8e, 0x49, 0x86, 0x94, 0xb1, 0x93, 0x0c, 0x42, 0x31, 0x3a, 0x21, 0x09, - 0xc4, 0x8a, 0x51, 0x18, 0xb7, 0x43, 0x12, 0x60, 0x06, 0x89, 0x6f, 0x66, 0x7a, 0x67, 0x8b, 0x13, - 0x3a, 0x71, 0x33, 0x53, 0x08, 0xd6, 0xb0, 0x50, 0x15, 0x4a, 0x61, 0x62, 0x66, 0xfb, 0xda, 0xa6, - 0xe3, 0xec, 0xea, 0x56, 0x33, 0x19, 0x13, 0x31, 0x6e, 0x94, 0xa1, 0x9e, 0x57, 0xf7, 0x57, 0x0a, - 0x80, 0xf8, 0x10, 0x7e, 0x83, 0x0d, 0xdc, 0xed, 0xf4, 0xc0, 0xf5, 0xbf, 0x25, 0x8e, 0x6b, 0xf4, - 0xfe, 0xcc, 0x82, 0x27, 0x96, 0x5d, 0xaf, 0x41, 0x82, 0x9c, 0x05, 0xf8, 0x68, 0xde, 0xb2, 0x47, - 0x63, 0x1a, 0x8c, 0x25, 0x36, 0x70, 0x0c, 0x4b, 0xcc, 0xfe, 0x63, 0x0b, 0x10, 0xff, 0xec, 0x77, - 0xdd, 0xc7, 0xde, 0x4e, 0x7f, 0xec, 0x31, 0x2c, 0x0b, 0xfb, 0x26, 0x4c, 0x2c, 0x37, 0x5d, 0xe2, - 0x45, 0x95, 0xea, 0xb2, 0xef, 0x6d, 0xb9, 0xdb, 0xe8, 0x15, 0x98, 0x88, 0xdc, 0x16, 0xf1, 0x3b, - 0x51, 0x8d, 0xd4, 0x7d, 0x8f, 0xbd, 0x24, 0xad, 0xcb, 0x83, 0x4b, 0xe8, 0xc1, 0xc1, 0xfc, 0xc4, - 0x86, 0x01, 0xc1, 0x09, 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xad, 0xb6, 0xef, 0x11, 0x2f, 0x5a, - 0xf6, 0xbd, 0x06, 0x97, 0x38, 0xbc, 0x02, 0x03, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x97, 0xe4, 0x46, - 0xa1, 0xa3, 0x70, 0x78, 0x30, 0x7f, 0x26, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x05, 0x86, - 0xc2, 0xc8, 0x89, 0x3a, 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x58, 0xe9, 0xe1, 0xc1, 0xfc, - 0xa4, 0xaa, 0xc6, 0x8b, 0xb0, 0xa8, 0x80, 0x9e, 0x86, 0xe1, 0x16, 0x09, 0x43, 0x67, 0x5b, 0xde, - 0x86, 0x93, 0xa2, 0xee, 0xf0, 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x2e, 0xc2, 0x20, 0x09, 0x02, 0x3f, - 0x10, 0x7b, 0x74, 0x5c, 0x20, 0x0e, 0xae, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb3, 0x60, 0x52, - 0xf5, 0x95, 0xb7, 0x75, 0x02, 0xaf, 0x82, 0x4f, 0x01, 0xd4, 0xe5, 0x07, 0x86, 0xec, 0xf6, 0x18, - 0x7d, 0xfe, 0x52, 0xe6, 0x45, 0x9d, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, - 0x27, 0x16, 0xcc, 0x24, 0xbe, 0xe8, 0xa6, 0x1b, 0x46, 0xe8, 0xcd, 0xd4, 0x57, 0x2d, 0xf4, 0xf7, - 0x55, 0xb4, 0x36, 0xfb, 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0xd7, 0x61, 0xd0, 0x8d, 0x48, - 0x4b, 0x7e, 0xcc, 0xc5, 0xae, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x85, 0xd6, 0xc4, 0x9c, 0x80, - 0xfd, 0xab, 0x45, 0x28, 0xf1, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x60, 0x2e, 0x9e, 0x81, 0x92, 0xdb, - 0x6a, 0x75, 0x22, 0x67, 0x53, 0x1c, 0xe7, 0x23, 0x7c, 0x6b, 0x55, 0x64, 0x21, 0x8e, 0xe1, 0xa8, - 0x02, 0x03, 0xac, 0x2b, 0xfc, 0x2b, 0x9f, 0xca, 0xfe, 0x4a, 0xd1, 0xf7, 0x85, 0xb2, 0x13, 0x39, - 0x9c, 0x93, 0x52, 0xf7, 0x08, 0x2d, 0xc2, 0x8c, 0x04, 0x72, 0x00, 0x36, 0x5d, 0xcf, 0x09, 0xf6, - 0x69, 0xd9, 0x6c, 0x91, 0x11, 0x7c, 0xae, 0x3b, 0xc1, 0x25, 0x85, 0xcf, 0xc9, 0xaa, 0x0f, 0x8b, - 0x01, 0x58, 0x23, 0x3a, 0xf7, 0x11, 0x28, 0x29, 0xe4, 0xa3, 0x30, 0x44, 0x73, 0x1f, 0x83, 0xc9, - 0x44, 0x5b, 0xbd, 0xaa, 0x8f, 0xe9, 0xfc, 0xd4, 0x2f, 0xb1, 0x23, 0x43, 0xf4, 0x7a, 0xc5, 0xdb, - 0x13, 0x47, 0xee, 0xdb, 0x70, 0xaa, 0x99, 0x71, 0x92, 0x89, 0x79, 0xed, 0xff, 0xe4, 0x7b, 0x42, - 0x7c, 0xf6, 0xa9, 0x2c, 0x28, 0xce, 0x6c, 0x83, 0xf2, 0x08, 0x7e, 0x9b, 0x6e, 0x10, 0xa7, 0xa9, - 0xb3, 0xdb, 0xb7, 0x44, 0x19, 0x56, 0x50, 0x7a, 0xde, 0x9d, 0x52, 0x9d, 0xbf, 0x41, 0xf6, 0x6b, - 0xa4, 0x49, 0xea, 0x91, 0x1f, 0x7c, 0x5d, 0xbb, 0x7f, 0x8e, 0x8f, 0x3e, 0x3f, 0x2e, 0x47, 0x05, - 0x81, 0xe2, 0x0d, 0xb2, 0xcf, 0xa7, 0x42, 0xff, 0xba, 0x62, 0xd7, 0xaf, 0xfb, 0x19, 0x0b, 0xc6, - 0xd5, 0xd7, 0x9d, 0xc0, 0xb9, 0xb0, 0x64, 0x9e, 0x0b, 0xe7, 0xba, 0x2e, 0xf0, 0x9c, 0x13, 0xe1, - 0x2b, 0x05, 0x38, 0xab, 0x70, 0xe8, 0xdb, 0x80, 0xff, 0x11, 0xab, 0xea, 0x0a, 0x94, 0x3c, 0x25, - 0xb5, 0xb2, 0x4c, 0x71, 0x51, 0x2c, 0xb3, 0x8a, 0x71, 0x28, 0x8b, 0xe7, 0xc5, 0xa2, 0xa5, 0x31, - 0x5d, 0x9c, 0x2b, 0x44, 0xb7, 0x4b, 0x50, 0xec, 0xb8, 0x0d, 0x71, 0xc1, 0x7c, 0x48, 0x8e, 0xf6, - 0xed, 0x4a, 0xf9, 0xf0, 0x60, 0xfe, 0xc9, 0x3c, 0x55, 0x02, 0xbd, 0xd9, 0xc2, 0x85, 0xdb, 0x95, - 0x32, 0xa6, 0x95, 0xd1, 0x22, 0x4c, 0x4a, 0x6d, 0xc9, 0x1d, 0xca, 0x6e, 0xf9, 0x9e, 0xb8, 0x87, - 0x94, 0x4c, 0x16, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x65, 0x98, 0xda, 0xed, 0x6c, 0x92, 0x26, 0x89, - 0xf8, 0x07, 0xdf, 0x20, 0x5c, 0x62, 0x59, 0x8a, 0x5f, 0x66, 0x37, 0x12, 0x70, 0x9c, 0xaa, 0x61, - 0xff, 0x05, 0xbb, 0x0f, 0xc4, 0xe8, 0x55, 0x03, 0x9f, 0x2e, 0x2c, 0x4a, 0xfd, 0xeb, 0xb9, 0x9c, - 0xfb, 0x59, 0x15, 0x37, 0xc8, 0xfe, 0x86, 0x4f, 0x39, 0xf3, 0xec, 0x55, 0x61, 0xac, 0xf9, 0x81, - 0xae, 0x6b, 0xfe, 0xe7, 0x0a, 0x70, 0x5a, 0x8d, 0x80, 0xc1, 0x04, 0x7e, 0xa3, 0x8f, 0xc1, 0x55, - 0x18, 0x6d, 0x90, 0x2d, 0xa7, 0xd3, 0x8c, 0x94, 0xf8, 0x7c, 0x90, 0xab, 0x50, 0xca, 0x71, 0x31, - 0xd6, 0x71, 0x8e, 0x30, 0x6c, 0xff, 0x73, 0x94, 0x5d, 0xc4, 0x91, 0x43, 0xd7, 0xb8, 0xda, 0x35, - 0x56, 0xee, 0xae, 0xb9, 0x08, 0x83, 0x6e, 0x8b, 0x32, 0x66, 0x05, 0x93, 0xdf, 0xaa, 0xd0, 0x42, - 0xcc, 0x61, 0xe8, 0x03, 0x30, 0x5c, 0xf7, 0x5b, 0x2d, 0xc7, 0x6b, 0xb0, 0x2b, 0xaf, 0xb4, 0x34, - 0x4a, 0x79, 0xb7, 0x65, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x01, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, - 0xa5, 0xa5, 0x11, 0xda, 0xd2, 0x62, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0xf7, 0xfc, 0x60, - 0xd7, 0xf5, 0xb6, 0xcb, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x2a, 0x08, 0xd6, 0xb0, 0xd0, - 0x2a, 0x0c, 0xb6, 0xfd, 0x20, 0x0a, 0x67, 0x87, 0xd8, 0x70, 0x3f, 0x99, 0x73, 0x10, 0xf1, 0xaf, - 0xad, 0xfa, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0xdd, 0x84, 0x61, 0xe2, 0xed, - 0xad, 0x06, 0x7e, 0x6b, 0x76, 0x26, 0x9f, 0xd2, 0x0a, 0x47, 0xe1, 0xcb, 0x2c, 0xe6, 0x51, 0x45, - 0x31, 0x96, 0x24, 0xd0, 0xb7, 0x40, 0x91, 0x78, 0x7b, 0xb3, 0xc3, 0x8c, 0xd2, 0x5c, 0x0e, 0xa5, - 0x3b, 0x4e, 0x10, 0x9f, 0xf9, 0x2b, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0x27, 0xa1, 0x24, 0x0f, 0x8c, - 0x50, 0x08, 0xeb, 0x32, 0x17, 0xac, 0x3c, 0x66, 0x30, 0x79, 0xab, 0xe3, 0x06, 0xa4, 0x45, 0xbc, - 0x28, 0x8c, 0x4f, 0x48, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x52, 0x4a, 0x88, 0xd7, 0xfc, 0x8e, - 0x17, 0x85, 0xb3, 0x25, 0xd6, 0xbd, 0x4c, 0xdd, 0xdd, 0x9d, 0x18, 0x2f, 0x29, 0x42, 0xe6, 0x95, - 0xb1, 0x41, 0x0a, 0x7d, 0x1a, 0xc6, 0xf9, 0x7f, 0xae, 0x01, 0x0b, 0x67, 0x4f, 0x33, 0xda, 0x17, - 0xf2, 0x69, 0x73, 0xc4, 0xa5, 0xd3, 0x82, 0xf8, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x1a, 0xc2, 0x30, - 0xde, 0x74, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x1a, 0xf8, 0x9b, 0x64, 0x16, 0xd8, 0xc0, 0x9c, 0xcd, - 0xd6, 0x98, 0xf9, 0x9b, 0x64, 0x69, 0x9a, 0xd2, 0xbc, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b, - 0x26, 0xe8, 0x8b, 0xcd, 0x8d, 0x89, 0x8e, 0xf6, 0x22, 0xca, 0xde, 0x55, 0xd8, 0xa8, 0x84, 0x13, - 0x44, 0xd0, 0x2d, 0x18, 0x0b, 0x23, 0x27, 0x88, 0x3a, 0x6d, 0x4e, 0xf4, 0x4c, 0x2f, 0xa2, 0x4c, - 0xe1, 0x5a, 0xd3, 0xaa, 0x60, 0x83, 0x00, 0x7a, 0x1d, 0x4a, 0x4d, 0x77, 0x8b, 0xd4, 0xf7, 0xeb, - 0x4d, 0x32, 0x3b, 0xc6, 0xa8, 0x65, 0x1e, 0x2a, 0x37, 0x25, 0x12, 0xe7, 0x73, 0xd5, 0x5f, 0x1c, - 0x57, 0x47, 0x77, 0xe0, 0x4c, 0x44, 0x82, 0x96, 0xeb, 0x39, 0xf4, 0x30, 0x10, 0x4f, 0x2b, 0xa6, - 0xc8, 0x1c, 0x67, 0xbb, 0xed, 0xbc, 0x98, 0x8d, 0x33, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, - 0x0f, 0xb3, 0x19, 0x10, 0xbf, 0xe9, 0xd6, 0xf7, 0x67, 0x4f, 0x31, 0xca, 0x1f, 0x15, 0x94, 0x67, - 0x37, 0x72, 0xf0, 0x0e, 0xbb, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x0b, 0x26, 0xd9, 0x09, 0x54, 0xed, - 0x34, 0x9b, 0xa2, 0xc1, 0x09, 0xd6, 0xe0, 0x07, 0xe4, 0x7d, 0x5c, 0x31, 0xc1, 0x87, 0x07, 0xf3, - 0x10, 0xff, 0xc3, 0xc9, 0xda, 0x68, 0x93, 0xe9, 0xcc, 0x3a, 0x81, 0x1b, 0xed, 0xd3, 0x73, 0x83, - 0xdc, 0x8f, 0x66, 0x27, 0xbb, 0xca, 0x2b, 0x74, 0x54, 0xa5, 0x58, 0xd3, 0x0b, 0x71, 0x92, 0x20, - 0x3d, 0x52, 0xc3, 0xa8, 0xe1, 0x7a, 0xb3, 0x53, 0xfc, 0x5d, 0x22, 0x4f, 0xa4, 0x1a, 0x2d, 0xc4, - 0x1c, 0xc6, 0xf4, 0x65, 0xf4, 0xc7, 0x2d, 0x7a, 0x73, 0x4d, 0x33, 0xc4, 0x58, 0x5f, 0x26, 0x01, - 0x38, 0xc6, 0xa1, 0xcc, 0x64, 0x14, 0xed, 0xcf, 0x22, 0x86, 0xaa, 0x0e, 0x96, 0x8d, 0x8d, 0x4f, - 0x62, 0x5a, 0x6e, 0x6f, 0xc2, 0x84, 0x3a, 0x08, 0xd9, 0x98, 0xa0, 0x79, 0x18, 0x64, 0xec, 0x93, - 0x90, 0xae, 0x95, 0x68, 0x17, 0x18, 0x6b, 0x85, 0x79, 0x39, 0xeb, 0x82, 0xfb, 0x36, 0x59, 0xda, - 0x8f, 0x08, 0x7f, 0xd3, 0x17, 0xb5, 0x2e, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0xc3, 0xd9, 0xd0, - 0xf8, 0xb4, 0xed, 0xe3, 0x7e, 0x79, 0x16, 0x46, 0x76, 0xfc, 0x30, 0xa2, 0xd8, 0xac, 0x8d, 0xc1, - 0x98, 0xf1, 0xbc, 0x2e, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc2, 0x78, 0x5d, 0x6f, 0x40, 0x5c, 0x8e, - 0xea, 0x18, 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x19, 0x46, 0x98, 0x0d, 0x48, 0xdd, 0x6f, 0x0a, - 0xae, 0x4d, 0xde, 0xf0, 0x23, 0x55, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x82, 0x21, - 0xda, 0x85, 0x4a, 0x55, 0x5c, 0x4b, 0x4a, 0x50, 0x74, 0x9d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x52, - 0x41, 0x1b, 0x65, 0xfa, 0x1e, 0x26, 0xa8, 0x0a, 0xc3, 0xf7, 0x1c, 0x37, 0x72, 0xbd, 0x6d, 0xc1, - 0x7f, 0x3c, 0xdd, 0xf5, 0x8e, 0x62, 0x95, 0xee, 0xf2, 0x0a, 0xfc, 0x16, 0x15, 0x7f, 0xb0, 0x24, - 0x43, 0x29, 0x06, 0x1d, 0xcf, 0xa3, 0x14, 0x0b, 0xfd, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, - 0x60, 0x49, 0x06, 0xbd, 0x09, 0x20, 0x77, 0x18, 0x69, 0x08, 0xdb, 0x8b, 0x67, 0x7b, 0x13, 0xdd, - 0x50, 0x75, 0x96, 0x26, 0xe8, 0x1d, 0x1d, 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0x62, 0x7c, 0x5a, 0xba, - 0x33, 0xe8, 0xdb, 0xe8, 0x12, 0x77, 0x82, 0x88, 0x34, 0x16, 0x23, 0x31, 0x38, 0x1f, 0xec, 0xef, - 0x91, 0xb2, 0xe1, 0xb6, 0x88, 0xbe, 0x1d, 0x04, 0x11, 0x1c, 0xd3, 0xb3, 0x7f, 0xa1, 0x08, 0xb3, - 0x79, 0xdd, 0xa5, 0x8b, 0x8e, 0xdc, 0x77, 0xa3, 0x65, 0xca, 0x5e, 0x59, 0xe6, 0xa2, 0x5b, 0x11, - 0xe5, 0x58, 0x61, 0xd0, 0xd9, 0x0f, 0xdd, 0x6d, 0xf9, 0xc6, 0x1c, 0x8c, 0x67, 0xbf, 0xc6, 0x4a, - 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, 0xa1, 0x30, 0xee, 0xd1, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, - 0x75, 0x69, 0xd7, 0x40, 0x0f, 0x69, 0x97, 0x31, 0x44, 0x83, 0xc7, 0x3b, 0x44, 0xe8, 0x33, 0x00, - 0x5b, 0xae, 0xe7, 0x86, 0x3b, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, 0x2b, 0xe6, 0x6c, 0x55, 0x51, 0xc1, - 0x1a, 0x45, 0xf4, 0x12, 0x8c, 0xaa, 0x0d, 0x58, 0x29, 0x33, 0x4d, 0xa7, 0x66, 0x39, 0x12, 0x9f, - 0x46, 0x65, 0xac, 0xe3, 0xd9, 0x9f, 0x4b, 0xae, 0x17, 0xb1, 0x03, 0xb4, 0xf1, 0xb5, 0xfa, 0x1d, - 0xdf, 0x42, 0xf7, 0xf1, 0xb5, 0xbf, 0x56, 0x84, 0x49, 0xa3, 0xb1, 0x4e, 0xd8, 0xc7, 0x99, 0x75, - 0x8d, 0x1e, 0xe0, 0x4e, 0x44, 0xc4, 0xfe, 0xb3, 0x7b, 0x6f, 0x15, 0xfd, 0x90, 0xa7, 0x3b, 0x80, - 0xd7, 0x47, 0x9f, 0x81, 0x52, 0xd3, 0x09, 0x99, 0xe4, 0x8c, 0x88, 0x7d, 0xd7, 0x0f, 0xb1, 0xf8, - 0x61, 0xe2, 0x84, 0x91, 0x76, 0x6b, 0x72, 0xda, 0x31, 0x49, 0x7a, 0xd3, 0x50, 0xfe, 0x44, 0x5a, - 0x8f, 0xa9, 0x4e, 0x50, 0x26, 0x66, 0x1f, 0x73, 0x18, 0x7a, 0x19, 0xc6, 0x02, 0xc2, 0x56, 0xc5, - 0x32, 0xe5, 0xe6, 0xd8, 0x32, 0x1b, 0x8c, 0xd9, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x19, 0xbf, 0x0d, - 0x86, 0xba, 0xbc, 0x0d, 0x9e, 0x86, 0x61, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x15, 0x5e, 0x8c, - 0x25, 0x3c, 0xb9, 0x60, 0x46, 0xfa, 0x5b, 0x30, 0xf4, 0xf5, 0x21, 0x16, 0x35, 0xd3, 0x32, 0x8f, - 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c, 0x61, 0xf6, 0x07, 0x61, 0xa2, 0xec, 0x90, 0x96, 0xef, 0xad, - 0x78, 0x8d, 0xb6, 0xef, 0x7a, 0x11, 0x9a, 0x85, 0x01, 0x76, 0x89, 0xf0, 0x23, 0x60, 0x80, 0x36, - 0x84, 0x59, 0x89, 0xbd, 0x0d, 0xa7, 0xcb, 0xfe, 0x3d, 0xef, 0x9e, 0x13, 0x34, 0x16, 0xab, 0x15, - 0xed, 0x7d, 0xbd, 0x2e, 0xdf, 0x77, 0xdc, 0x68, 0x2b, 0xf3, 0xe8, 0xd5, 0x6a, 0x72, 0xb6, 0x76, - 0xd5, 0x6d, 0x92, 0x1c, 0x29, 0xc8, 0x5f, 0x2d, 0x18, 0x2d, 0xc5, 0xf8, 0x4a, 0xab, 0x65, 0xe5, - 0x6a, 0xb5, 0xde, 0x80, 0x91, 0x2d, 0x97, 0x34, 0x1b, 0x98, 0x6c, 0x89, 0x95, 0xf8, 0x54, 0xbe, - 0x1d, 0xca, 0x2a, 0xc5, 0x94, 0x52, 0x2f, 0xfe, 0x3a, 0x5c, 0x15, 0x95, 0xb1, 0x22, 0x83, 0x76, - 0x61, 0x4a, 0x3e, 0x18, 0x24, 0x54, 0xac, 0xcb, 0xa7, 0xbb, 0xbd, 0x42, 0x4c, 0xe2, 0xa7, 0x1e, - 0x1c, 0xcc, 0x4f, 0xe1, 0x04, 0x19, 0x9c, 0x22, 0x4c, 0x9f, 0x83, 0x2d, 0x7a, 0x02, 0x0f, 0xb0, - 0xe1, 0x67, 0xcf, 0x41, 0xf6, 0xb2, 0x65, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0x6a, 0x64, 0xc4, - 0x0b, 0xff, 0x98, 0x67, 0x21, 0xf9, 0xe2, 0x2e, 0xf4, 0x7e, 0x71, 0xdb, 0x7f, 0xd7, 0x82, 0x53, - 0x2b, 0xad, 0x76, 0xb4, 0x5f, 0x76, 0x4d, 0x15, 0xd4, 0x47, 0x60, 0xa8, 0x45, 0x1a, 0x6e, 0xa7, - 0x25, 0x66, 0x6e, 0x5e, 0x9e, 0x52, 0x6b, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0xbc, 0x16, 0xf9, 0x81, - 0xb3, 0x4d, 0x78, 0x01, 0x16, 0xe8, 0xec, 0xac, 0x77, 0xdf, 0x26, 0x37, 0xdd, 0x96, 0x2b, 0xed, - 0x8a, 0xba, 0xca, 0xec, 0x16, 0xe4, 0x80, 0x2e, 0xbc, 0xd1, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0x85, - 0xf6, 0x48, 0x12, 0xc1, 0x31, 0x3d, 0xfb, 0xab, 0x16, 0x4c, 0xca, 0x75, 0xbf, 0xd8, 0x68, 0x04, - 0x24, 0x0c, 0xd1, 0x1c, 0x14, 0xdc, 0xb6, 0xe8, 0x25, 0x88, 0x5e, 0x16, 0x2a, 0x55, 0x5c, 0x70, - 0xdb, 0x92, 0x2d, 0x63, 0x07, 0x61, 0xd1, 0x54, 0xa4, 0x5d, 0x17, 0xe5, 0x58, 0x61, 0xa0, 0xcb, - 0x30, 0xe2, 0xf9, 0x0d, 0x6e, 0xdb, 0xc5, 0xaf, 0x34, 0xb6, 0xc0, 0xd6, 0x45, 0x19, 0x56, 0x50, - 0x54, 0x85, 0x12, 0x37, 0x7b, 0x8a, 0x17, 0x6d, 0x5f, 0xc6, 0x53, 0xec, 0xcb, 0x36, 0x64, 0x4d, - 0x1c, 0x13, 0xb1, 0x7f, 0xc5, 0x82, 0x31, 0xf9, 0x65, 0x7d, 0xf2, 0x9c, 0x74, 0x6b, 0xc5, 0xfc, - 0x66, 0xbc, 0xb5, 0x28, 0xcf, 0xc8, 0x20, 0x06, 0xab, 0x58, 0x3c, 0x12, 0xab, 0x78, 0x15, 0x46, - 0x9d, 0x76, 0xbb, 0x6a, 0xf2, 0x99, 0x6c, 0x29, 0x2d, 0xc6, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xb4, - 0x00, 0x13, 0xf2, 0x0b, 0x6a, 0x9d, 0xcd, 0x90, 0x44, 0x68, 0x03, 0x4a, 0x0e, 0x9f, 0x25, 0x22, - 0x17, 0xf9, 0xc5, 0x6c, 0x39, 0x82, 0x31, 0xa5, 0xf1, 0x85, 0xbf, 0x28, 0x6b, 0xe3, 0x98, 0x10, - 0x6a, 0xc2, 0xb4, 0xe7, 0x47, 0xec, 0xf0, 0x57, 0xf0, 0x6e, 0xaa, 0x9d, 0x24, 0xf5, 0xb3, 0x82, - 0xfa, 0xf4, 0x7a, 0x92, 0x0a, 0x4e, 0x13, 0x46, 0x2b, 0x52, 0x36, 0x53, 0xcc, 0x17, 0x06, 0xe8, - 0x13, 0x97, 0x2d, 0x9a, 0xb1, 0x7f, 0xd9, 0x82, 0x92, 0x44, 0x3b, 0x09, 0x2d, 0xde, 0x1a, 0x0c, - 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xdd, 0x3a, 0xce, 0xe7, 0x2b, 0xbe, 0xd3, 0xf8, 0xff, 0x10, - 0x4b, 0x1a, 0x4c, 0x34, 0xaf, 0xba, 0xff, 0x2e, 0x11, 0xcd, 0xab, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, - 0xc8, 0xfa, 0xac, 0xc9, 0xba, 0x28, 0xeb, 0xd5, 0x0e, 0xc8, 0x96, 0x7b, 0x3f, 0xc9, 0x7a, 0x55, - 0x59, 0x29, 0x16, 0x50, 0xf4, 0x26, 0x8c, 0xd5, 0xa5, 0x4c, 0x36, 0xde, 0xe1, 0x97, 0xba, 0xea, - 0x07, 0x94, 0x2a, 0x89, 0xcb, 0x42, 0x96, 0xb5, 0xfa, 0xd8, 0xa0, 0x66, 0x9a, 0x11, 0x14, 0x7b, - 0x99, 0x11, 0xc4, 0x74, 0xf3, 0x95, 0xea, 0x3f, 0x6e, 0xc1, 0x10, 0x97, 0xc5, 0xf5, 0x27, 0x0a, - 0xd5, 0x34, 0x6b, 0xf1, 0xd8, 0xdd, 0xa1, 0x85, 0x42, 0x53, 0x86, 0xd6, 0xa0, 0xc4, 0x7e, 0x30, - 0x59, 0x62, 0x31, 0xdf, 0xea, 0x9e, 0xb7, 0xaa, 0x77, 0xf0, 0x8e, 0xac, 0x86, 0x63, 0x0a, 0xf6, - 0x0f, 0x17, 0xe9, 0xe9, 0x16, 0xa3, 0x1a, 0x97, 0xbe, 0xf5, 0xe8, 0x2e, 0xfd, 0xc2, 0xa3, 0xba, - 0xf4, 0xb7, 0x61, 0xb2, 0xae, 0xe9, 0xe1, 0xe2, 0x99, 0xbc, 0xdc, 0x75, 0x91, 0x68, 0x2a, 0x3b, - 0x2e, 0x65, 0x59, 0x36, 0x89, 0xe0, 0x24, 0x55, 0xf4, 0x6d, 0x30, 0xc6, 0xe7, 0x59, 0xb4, 0xc2, - 0x2d, 0x31, 0x3e, 0x90, 0xbf, 0x5e, 0xf4, 0x26, 0xb8, 0x54, 0x4e, 0xab, 0x8e, 0x0d, 0x62, 0xf6, - 0x9f, 0x58, 0x80, 0x56, 0xda, 0x3b, 0xa4, 0x45, 0x02, 0xa7, 0x19, 0x8b, 0xd3, 0xbf, 0xdf, 0x82, - 0x59, 0x92, 0x2a, 0x5e, 0xf6, 0x5b, 0x2d, 0xf1, 0x68, 0xc9, 0x79, 0x57, 0xaf, 0xe4, 0xd4, 0x51, - 0x6e, 0x09, 0xb3, 0x79, 0x18, 0x38, 0xb7, 0x3d, 0xb4, 0x06, 0x33, 0xfc, 0x96, 0x54, 0x00, 0xcd, - 0xf6, 0xfa, 0x71, 0x41, 0x78, 0x66, 0x23, 0x8d, 0x82, 0xb3, 0xea, 0xd9, 0xdf, 0x35, 0x06, 0xb9, - 0xbd, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e, 0xe1, 0x3d, 0x3d, 0xc2, 0x7b, 0x7a, 0x84, 0xf7, 0xf4, - 0x08, 0xdf, 0xf4, 0x7a, 0x84, 0x3f, 0xb2, 0x60, 0x26, 0x7d, 0x0d, 0x9c, 0x04, 0x63, 0xde, 0x81, - 0x99, 0xf4, 0x5d, 0xd7, 0xd5, 0xce, 0x2e, 0xdd, 0xcf, 0xf8, 0xde, 0xcb, 0xf8, 0x06, 0x9c, 0x45, - 0xdf, 0xfe, 0xcb, 0x16, 0x9c, 0x56, 0xc8, 0xc6, 0x4b, 0xff, 0xf3, 0x30, 0xc3, 0xcf, 0x97, 0xe5, - 0xa6, 0xe3, 0xb6, 0x36, 0x48, 0xab, 0xdd, 0x74, 0x22, 0x69, 0x66, 0x70, 0x35, 0x73, 0xab, 0x26, - 0x4c, 0x74, 0x8d, 0x8a, 0x4b, 0x8f, 0xd1, 0x7e, 0x65, 0x00, 0x70, 0x56, 0x33, 0xf6, 0x2f, 0x8c, - 0xc0, 0xe0, 0xca, 0x1e, 0xf1, 0xa2, 0x13, 0x18, 0xfa, 0x3a, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xdc, - 0x23, 0x0d, 0x0e, 0x3f, 0xca, 0xd3, 0xfd, 0x8c, 0x20, 0x3d, 0x51, 0x31, 0x48, 0xe0, 0x04, 0xc9, - 0x47, 0x21, 0x3e, 0xbf, 0x06, 0x43, 0xfc, 0xd6, 0x12, 0xb2, 0xf3, 0xcc, 0x4b, 0x8a, 0x0d, 0xa2, - 0xb8, 0x8b, 0x63, 0xd1, 0x3e, 0xbf, 0x15, 0x45, 0x75, 0xf4, 0x39, 0x98, 0xd8, 0x72, 0x83, 0x30, - 0xda, 0x70, 0x5b, 0x24, 0x8c, 0x9c, 0x56, 0xfb, 0x21, 0xc4, 0xe5, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, - 0x38, 0x41, 0x19, 0x6d, 0xc3, 0x78, 0xd3, 0xd1, 0x9b, 0x1a, 0x3e, 0x72, 0x53, 0xea, 0x3a, 0xbc, - 0xa9, 0x13, 0xc2, 0x26, 0x5d, 0x7a, 0x7e, 0xd4, 0x99, 0xc4, 0x77, 0x84, 0xc9, 0x41, 0xd4, 0xf9, - 0xc1, 0x45, 0xbd, 0x1c, 0x46, 0x39, 0x3b, 0x66, 0x11, 0x5c, 0x32, 0x39, 0x3b, 0xcd, 0xee, 0xf7, - 0xb3, 0x50, 0x22, 0x74, 0x08, 0x29, 0x61, 0x71, 0xa3, 0x5e, 0xe9, 0xaf, 0xaf, 0x6b, 0x6e, 0x3d, - 0xf0, 0x4d, 0x45, 0xc5, 0x8a, 0xa4, 0x84, 0x63, 0xa2, 0x68, 0x19, 0x86, 0x42, 0x12, 0xb8, 0x24, - 0x14, 0x77, 0x6b, 0x97, 0x69, 0x64, 0x68, 0xdc, 0x99, 0x86, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, 0x5e, - 0x0e, 0x93, 0xe1, 0xb2, 0xdb, 0x4f, 0x5b, 0x5e, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1d, 0x86, - 0x03, 0xd2, 0x64, 0x9a, 0xb0, 0xf1, 0xfe, 0x17, 0x39, 0x57, 0xac, 0xf1, 0x7a, 0x58, 0x12, 0x40, - 0x37, 0x00, 0x05, 0x84, 0x72, 0x86, 0xae, 0xb7, 0xad, 0xec, 0x64, 0xc5, 0xcd, 0xa2, 0x4e, 0x22, - 0x1c, 0x63, 0x48, 0xbf, 0x26, 0x9c, 0x51, 0x0d, 0x5d, 0x83, 0x69, 0x55, 0x5a, 0xf1, 0xc2, 0xc8, - 0xa1, 0x27, 0xfa, 0x24, 0xa3, 0xa5, 0x04, 0x33, 0x38, 0x89, 0x80, 0xd3, 0x75, 0xec, 0x2f, 0x5b, - 0xc0, 0xc7, 0xf9, 0x04, 0xc4, 0x11, 0xaf, 0x99, 0xe2, 0x88, 0xb3, 0xb9, 0x33, 0x97, 0x23, 0x8a, - 0xf8, 0xb2, 0x05, 0xa3, 0xda, 0xcc, 0xc6, 0x6b, 0xd6, 0xea, 0xb2, 0x66, 0x3b, 0x30, 0x45, 0x57, - 0xfa, 0xad, 0xcd, 0x90, 0x04, 0x7b, 0xa4, 0xc1, 0x16, 0x66, 0xe1, 0xe1, 0x16, 0xa6, 0xb2, 0xc9, - 0xbb, 0x99, 0x20, 0x88, 0x53, 0x4d, 0xd8, 0x9f, 0x95, 0x5d, 0x55, 0x26, 0x8c, 0x75, 0x35, 0xe7, - 0x09, 0x13, 0x46, 0x35, 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xc7, 0x0f, 0xa3, 0xa4, 0x09, 0xe3, - 0x75, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0x2f, 0x00, 0xac, 0xdc, 0x27, 0x75, 0xbe, 0x62, 0xf5, 0xd7, - 0x92, 0x95, 0xff, 0x5a, 0xb2, 0x7f, 0xcb, 0x82, 0x89, 0xd5, 0x65, 0xe3, 0xe6, 0x5a, 0x00, 0xe0, - 0x4f, 0xbc, 0xbb, 0x77, 0xd7, 0xa5, 0xfe, 0x9f, 0xab, 0x70, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x59, - 0x28, 0x36, 0x3b, 0x9e, 0x90, 0x97, 0x0e, 0x53, 0x7e, 0xe0, 0x66, 0xc7, 0xc3, 0xb4, 0x4c, 0xf3, - 0xa1, 0x28, 0xf6, 0xed, 0x43, 0xd1, 0x33, 0x96, 0x01, 0x9a, 0x87, 0xc1, 0x7b, 0xf7, 0xdc, 0x06, - 0xf7, 0x18, 0x15, 0xb6, 0x09, 0x77, 0xef, 0x56, 0xca, 0x21, 0xe6, 0xe5, 0xf6, 0x97, 0x8a, 0x30, - 0xb7, 0xda, 0x24, 0xf7, 0xdf, 0xa1, 0xd7, 0x6c, 0xbf, 0x1e, 0x20, 0x47, 0x93, 0x3c, 0x1d, 0xd5, - 0xcb, 0xa7, 0xf7, 0x78, 0x6c, 0xc1, 0x30, 0xb7, 0xe0, 0x93, 0x3e, 0xb4, 0xaf, 0x66, 0xb5, 0x9e, - 0x3f, 0x20, 0x0b, 0xdc, 0x12, 0x50, 0xb8, 0x00, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xee, - 0x15, 0x18, 0xd3, 0x31, 0x8f, 0xe4, 0x6f, 0xf7, 0xff, 0x15, 0x61, 0x8a, 0xf6, 0xe0, 0x91, 0x4e, - 0xc4, 0xed, 0xf4, 0x44, 0x1c, 0xb7, 0xcf, 0x55, 0xef, 0xd9, 0x78, 0x33, 0x39, 0x1b, 0x57, 0xf3, - 0x66, 0xe3, 0xa4, 0xe7, 0xe0, 0x3b, 0x2d, 0x98, 0x59, 0x6d, 0xfa, 0xf5, 0xdd, 0x84, 0x5f, 0xd4, - 0x4b, 0x30, 0x4a, 0x8f, 0xe3, 0xd0, 0x70, 0xd9, 0x37, 0x82, 0x38, 0x08, 0x10, 0xd6, 0xf1, 0xb4, - 0x6a, 0xb7, 0x6f, 0x57, 0xca, 0x59, 0xb1, 0x1f, 0x04, 0x08, 0xeb, 0x78, 0xf6, 0x6f, 0x58, 0x70, - 0xee, 0xda, 0xf2, 0x4a, 0xbc, 0x14, 0x53, 0xe1, 0x27, 0x2e, 0xc1, 0x50, 0xbb, 0xa1, 0x75, 0x25, - 0x96, 0x27, 0x97, 0x59, 0x2f, 0x04, 0xf4, 0xdd, 0x12, 0x5a, 0xe5, 0xa7, 0x2c, 0x98, 0xb9, 0xe6, - 0x46, 0xf4, 0x76, 0x4d, 0x06, 0x42, 0xa0, 0xd7, 0x6b, 0xe8, 0x46, 0x7e, 0xb0, 0x9f, 0x0c, 0x84, - 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, 0x97, 0xd9, 0x8e, 0x17, 0x4c, 0xcd, 0x1a, 0x16, - 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0x6b, 0xb8, 0x01, 0x13, 0x4a, 0xee, 0x8b, 0x13, 0x56, 0x7d, 0x58, - 0x59, 0x02, 0x70, 0x8c, 0x43, 0xdf, 0x67, 0xf3, 0xd7, 0x9a, 0x9d, 0x30, 0x22, 0xc1, 0x56, 0x98, - 0x73, 0x3a, 0xbe, 0x00, 0x25, 0x22, 0x55, 0x00, 0xa2, 0xd7, 0x8a, 0x63, 0x54, 0xba, 0x01, 0x1e, - 0x8f, 0x41, 0xe1, 0xf5, 0xe1, 0x65, 0x79, 0x34, 0x37, 0xb9, 0x55, 0x40, 0x44, 0x6f, 0x4b, 0x0f, - 0x50, 0xc1, 0x3c, 0xdd, 0x57, 0x52, 0x50, 0x9c, 0x51, 0xc3, 0xfe, 0x31, 0x0b, 0x4e, 0xab, 0x0f, - 0x7e, 0xd7, 0x7d, 0xa6, 0xfd, 0xb3, 0x05, 0x18, 0xbf, 0xbe, 0xb1, 0x51, 0xbd, 0x46, 0x22, 0x71, - 0x6d, 0xf7, 0x56, 0xec, 0x63, 0x4d, 0x3f, 0xd9, 0xed, 0x31, 0xd7, 0x89, 0xdc, 0xe6, 0x02, 0x8f, - 0x73, 0xb4, 0x50, 0xf1, 0xa2, 0x5b, 0x41, 0x2d, 0x0a, 0x5c, 0x6f, 0x3b, 0x53, 0xa3, 0x29, 0x99, - 0x8b, 0x62, 0x1e, 0x73, 0x81, 0x5e, 0x80, 0x21, 0x16, 0x68, 0x49, 0x4e, 0xc2, 0xe3, 0xea, 0x2d, - 0xc4, 0x4a, 0x0f, 0x0f, 0xe6, 0x4b, 0xb7, 0x71, 0x85, 0xff, 0xc1, 0x02, 0x15, 0xdd, 0x86, 0xd1, - 0x9d, 0x28, 0x6a, 0x5f, 0x27, 0x4e, 0x83, 0x3e, 0xc6, 0xf9, 0x71, 0x78, 0x3e, 0xeb, 0x38, 0xa4, - 0x83, 0xc0, 0xd1, 0xe2, 0x13, 0x24, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, 0xae, 0x01, 0xc4, 0xb0, 0x63, - 0x52, 0xcd, 0xd8, 0x7f, 0x60, 0xc1, 0x30, 0x8f, 0x79, 0x11, 0xa0, 0x8f, 0xc2, 0x00, 0xb9, 0x4f, - 0xea, 0x82, 0xe3, 0xcd, 0xec, 0x70, 0xcc, 0x69, 0x71, 0x11, 0x33, 0xfd, 0x8f, 0x59, 0x2d, 0x74, - 0x1d, 0x86, 0x69, 0x6f, 0xaf, 0xa9, 0x00, 0x20, 0x4f, 0xe6, 0x7d, 0xb1, 0x9a, 0x76, 0xce, 0x9c, - 0x89, 0x22, 0x2c, 0xab, 0x33, 0x7d, 0x78, 0xbd, 0x5d, 0xa3, 0x27, 0x76, 0xd4, 0x8d, 0xb1, 0xd8, - 0x58, 0xae, 0x72, 0x24, 0x41, 0x8d, 0xeb, 0xc3, 0x65, 0x21, 0x8e, 0x89, 0xd8, 0x1b, 0x50, 0xa2, - 0x93, 0xba, 0xd8, 0x74, 0x9d, 0xee, 0x2a, 0xfe, 0x67, 0xa0, 0x24, 0x15, 0xf8, 0xa1, 0xf0, 0x75, - 0x67, 0x54, 0xa5, 0x7e, 0x3f, 0xc4, 0x31, 0xdc, 0xde, 0x82, 0x53, 0xcc, 0x1c, 0xd3, 0x89, 0x76, - 0x8c, 0x3d, 0xd6, 0x7b, 0x31, 0x3f, 0x2b, 0x1e, 0x90, 0x7c, 0x66, 0x66, 0x35, 0x77, 0xd2, 0x31, - 0x49, 0x31, 0x7e, 0x4c, 0xda, 0x5f, 0x1b, 0x80, 0xc7, 0x2b, 0xb5, 0xfc, 0x70, 0x28, 0x2f, 0xc3, - 0x18, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0x25, 0x5b, 0xde, 0xd0, 0x60, 0xd8, 0xc0, - 0x44, 0xe7, 0xa0, 0xe8, 0xbe, 0xe5, 0x25, 0x9d, 0xad, 0x2a, 0x6f, 0xac, 0x63, 0x5a, 0x4e, 0xc1, - 0x94, 0xc5, 0xe5, 0x77, 0x87, 0x02, 0x2b, 0x36, 0xf7, 0x35, 0x98, 0x70, 0xc3, 0x7a, 0xe8, 0x56, - 0x3c, 0x7a, 0xce, 0x68, 0x27, 0x95, 0x12, 0x6e, 0xd0, 0x4e, 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0x17, - 0xd9, 0x60, 0xdf, 0x6c, 0x72, 0x4f, 0xe7, 0x6f, 0xfa, 0x02, 0x68, 0xb3, 0xaf, 0x0b, 0x99, 0x92, - 0x40, 0xbc, 0x00, 0xf8, 0x07, 0x87, 0x58, 0xc2, 0xe8, 0xcb, 0xb1, 0xbe, 0xe3, 0xb4, 0x17, 0x3b, - 0xd1, 0x4e, 0xd9, 0x0d, 0xeb, 0xfe, 0x1e, 0x09, 0xf6, 0xd9, 0xa3, 0x7f, 0x24, 0x7e, 0x39, 0x2a, - 0xc0, 0xf2, 0xf5, 0xc5, 0x2a, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, 0x11, 0x26, 0x65, 0x61, 0x8d, 0x84, - 0xec, 0x0a, 0x1b, 0x65, 0x64, 0x94, 0xfb, 0x93, 0x28, 0x56, 0x44, 0x92, 0xf8, 0x26, 0x27, 0x0d, - 0xc7, 0xc1, 0x49, 0x7f, 0x04, 0xc6, 0x5d, 0xcf, 0x8d, 0x5c, 0x27, 0xf2, 0xb9, 0x86, 0x8b, 0xbf, - 0xef, 0x99, 0xe8, 0xbe, 0xa2, 0x03, 0xb0, 0x89, 0x67, 0xff, 0x97, 0x01, 0x98, 0x66, 0xd3, 0xf6, - 0xde, 0x0a, 0xfb, 0x66, 0x5a, 0x61, 0xb7, 0xd3, 0x2b, 0xec, 0x38, 0x9e, 0x08, 0x0f, 0xbd, 0xcc, - 0x3e, 0x07, 0x25, 0xe5, 0xf1, 0x25, 0x5d, 0x3e, 0xad, 0x1c, 0x97, 0xcf, 0xde, 0xdc, 0x87, 0x34, - 0x9a, 0x2b, 0x66, 0x1a, 0xcd, 0xfd, 0x75, 0x0b, 0x62, 0x95, 0x0d, 0xba, 0x0e, 0xa5, 0xb6, 0xcf, - 0x6c, 0x41, 0x03, 0x69, 0x60, 0xfd, 0x78, 0xe6, 0x45, 0xc5, 0x2f, 0x45, 0xfe, 0xf1, 0x55, 0x59, - 0x03, 0xc7, 0x95, 0xd1, 0x12, 0x0c, 0xb7, 0x03, 0x52, 0x8b, 0x58, 0x54, 0x94, 0x9e, 0x74, 0xf8, - 0x1a, 0xe1, 0xf8, 0x58, 0x56, 0xb4, 0x7f, 0xce, 0x02, 0xe0, 0x76, 0x69, 0x8e, 0xb7, 0x4d, 0x4e, - 0x40, 0x6a, 0x5d, 0x86, 0x81, 0xb0, 0x4d, 0xea, 0xdd, 0xac, 0x74, 0xe3, 0xfe, 0xd4, 0xda, 0xa4, - 0x1e, 0x0f, 0x38, 0xfd, 0x87, 0x59, 0x6d, 0xfb, 0xbb, 0x01, 0x26, 0x62, 0xb4, 0x4a, 0x44, 0x5a, - 0xe8, 0x39, 0x23, 0x4a, 0xc2, 0xd9, 0x44, 0x94, 0x84, 0x12, 0xc3, 0xd6, 0x04, 0xa4, 0x9f, 0x83, - 0x62, 0xcb, 0xb9, 0x2f, 0x24, 0x60, 0xcf, 0x74, 0xef, 0x06, 0xa5, 0xbf, 0xb0, 0xe6, 0xdc, 0xe7, - 0x8f, 0xc4, 0x67, 0xe4, 0x02, 0x59, 0x73, 0xee, 0x1f, 0x72, 0x5b, 0x5c, 0x76, 0x48, 0xdd, 0x74, - 0xc3, 0xe8, 0x0b, 0xff, 0x39, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0x7a, 0xc2, 0xe4, - 0xaa, 0xaf, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0xf5, 0xd1, 0x96, 0xeb, 0xa1, 0xb7, 0x61, 0x58, - 0x58, 0x44, 0x8a, 0xa8, 0x44, 0x57, 0xfa, 0x68, 0x4f, 0x18, 0x54, 0xf2, 0x36, 0xaf, 0xc8, 0x47, - 0xb0, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x15, 0x0b, 0x26, 0xc4, 0x6f, 0x4c, 0xde, 0xea, - 0x90, 0x30, 0x12, 0xbc, 0xe7, 0x87, 0xfb, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, 0x61, 0x79, 0xcc, - 0x9a, 0xc0, 0x9e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x7d, 0x0b, 0x4e, 0xb5, 0x9c, 0xfb, 0xbc, 0x45, - 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x65, 0xc1, 0x47, 0xfb, 0x9b, 0xfe, 0x54, 0x75, 0xde, 0x49, - 0xa9, 0xfe, 0x3c, 0x95, 0x85, 0xd2, 0xb3, 0xab, 0x99, 0xfd, 0x9a, 0xdb, 0x82, 0x11, 0xb9, 0xde, - 0x32, 0x44, 0x0d, 0x65, 0x9d, 0xb1, 0x3e, 0xb2, 0x41, 0xaa, 0x1e, 0x7d, 0x80, 0xb6, 0x23, 0xd6, - 0xda, 0x23, 0x6d, 0xe7, 0x73, 0x30, 0xa6, 0xaf, 0xb1, 0x47, 0xda, 0xd6, 0x5b, 0x30, 0x93, 0xb1, - 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe0, 0x6c, 0xee, 0xfa, 0x78, 0x94, 0x0d, 0xdb, 0x3f, 0x6b, 0xe9, - 0xe7, 0xe0, 0x09, 0xa8, 0x0e, 0x96, 0x4d, 0xd5, 0xc1, 0xf9, 0xee, 0x3b, 0x27, 0x47, 0x7f, 0xf0, - 0xa6, 0xde, 0x69, 0x7a, 0xaa, 0xa3, 0xd7, 0x61, 0xa8, 0x49, 0x4b, 0xa4, 0x5d, 0xad, 0xdd, 0x7b, - 0x47, 0xc6, 0xbc, 0x14, 0x2b, 0x0f, 0xb1, 0xa0, 0x60, 0xff, 0xa2, 0x05, 0x03, 0x27, 0x30, 0x12, - 0xd8, 0x1c, 0x89, 0xe7, 0x72, 0x49, 0x8b, 0x80, 0xc9, 0x0b, 0xd8, 0xb9, 0xb7, 0x72, 0x3f, 0x22, - 0x5e, 0xc8, 0x9e, 0x8a, 0x99, 0x03, 0xf3, 0x93, 0x16, 0xcc, 0xdc, 0xf4, 0x9d, 0xc6, 0x92, 0xd3, - 0x74, 0xbc, 0x3a, 0x09, 0x2a, 0xde, 0xf6, 0x91, 0x8c, 0xc2, 0x0b, 0x3d, 0x8d, 0xc2, 0x97, 0xa5, - 0x4d, 0xd5, 0x40, 0xfe, 0xfc, 0x51, 0x46, 0x32, 0x19, 0x37, 0xc6, 0xb0, 0xfe, 0xdd, 0x01, 0xa4, - 0xf7, 0x52, 0xb8, 0xe8, 0x60, 0x18, 0x76, 0x79, 0x7f, 0xc5, 0x24, 0x3e, 0x95, 0xcd, 0xe0, 0xa5, - 0x3e, 0x4f, 0x73, 0x3e, 0xe1, 0x05, 0x58, 0x12, 0xb2, 0x5f, 0x86, 0x4c, 0x3f, 0xff, 0xde, 0xc2, - 0x07, 0xfb, 0x93, 0x30, 0xcd, 0x6a, 0x1e, 0xf1, 0x61, 0x6c, 0x27, 0x64, 0x9b, 0x19, 0x11, 0x00, - 0xed, 0x2f, 0x5a, 0x30, 0xb9, 0x9e, 0x08, 0x8c, 0x76, 0x89, 0x69, 0x43, 0x33, 0x44, 0xea, 0x35, - 0x56, 0x8a, 0x05, 0xf4, 0xd8, 0x25, 0x59, 0x7f, 0x61, 0x41, 0x1c, 0x7a, 0xe3, 0x04, 0xd8, 0xb7, - 0x65, 0x83, 0x7d, 0xcb, 0x94, 0xb0, 0xa8, 0xee, 0xe4, 0x71, 0x6f, 0xe8, 0x86, 0x0a, 0x4a, 0xd5, - 0x45, 0xb8, 0x12, 0x93, 0xe1, 0x4b, 0x71, 0xc2, 0x8c, 0x5c, 0x25, 0xc3, 0x54, 0xd9, 0xbf, 0x5d, - 0x00, 0xa4, 0x70, 0xfb, 0x0e, 0x9a, 0x95, 0xae, 0x71, 0x3c, 0x41, 0xb3, 0xf6, 0x00, 0x31, 0x7d, - 0x7e, 0xe0, 0x78, 0x21, 0x27, 0xeb, 0x0a, 0xd9, 0xdd, 0xd1, 0x8c, 0x05, 0xe6, 0x44, 0x93, 0xe8, - 0x66, 0x8a, 0x1a, 0xce, 0x68, 0x41, 0xb3, 0xd3, 0x18, 0xec, 0xd7, 0x4e, 0x63, 0xa8, 0x87, 0x1b, - 0xde, 0xcf, 0x58, 0x30, 0xae, 0x86, 0xe9, 0x5d, 0x62, 0x24, 0xaf, 0xfa, 0x93, 0x73, 0x80, 0x56, - 0xb5, 0x2e, 0xb3, 0x8b, 0xe5, 0x5b, 0x99, 0x3b, 0xa5, 0xd3, 0x74, 0xdf, 0x26, 0x2a, 0x64, 0xe1, - 0xbc, 0x70, 0x8f, 0x14, 0xa5, 0x87, 0x07, 0xf3, 0xe3, 0xea, 0x1f, 0x0f, 0x91, 0x1c, 0x57, 0xa1, - 0x47, 0xf2, 0x64, 0x62, 0x29, 0xa2, 0x97, 0x60, 0xb0, 0xbd, 0xe3, 0x84, 0x24, 0xe1, 0x4c, 0x34, - 0x58, 0xa5, 0x85, 0x87, 0x07, 0xf3, 0x13, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xbb, 0xff, 0x50, 0x64, - 0xe9, 0xc5, 0xd9, 0x33, 0x14, 0xd9, 0x9f, 0x58, 0x30, 0xb0, 0xee, 0x37, 0x4e, 0xe2, 0x08, 0x78, - 0xcd, 0x38, 0x02, 0x9e, 0xc8, 0x8b, 0x5e, 0x9f, 0xbb, 0xfb, 0x57, 0x13, 0xbb, 0xff, 0x7c, 0x2e, - 0x85, 0xee, 0x1b, 0xbf, 0x05, 0xa3, 0x2c, 0x26, 0xbe, 0x70, 0x9c, 0x7a, 0xc1, 0xd8, 0xf0, 0xf3, - 0x89, 0x0d, 0x3f, 0xa9, 0xa1, 0x6a, 0x3b, 0xfd, 0x69, 0x18, 0x16, 0x9e, 0x38, 0x49, 0xaf, 0x54, - 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0xf1, 0x22, 0x18, 0x31, 0xf8, 0xd1, 0x2f, 0x5b, 0xb0, 0x10, 0x70, - 0x0b, 0xdd, 0x46, 0xb9, 0x13, 0xb8, 0xde, 0x76, 0xad, 0xbe, 0x43, 0x1a, 0x9d, 0xa6, 0xeb, 0x6d, - 0x57, 0xb6, 0x3d, 0x5f, 0x15, 0xaf, 0xdc, 0x27, 0xf5, 0x0e, 0x53, 0x82, 0xf5, 0x08, 0xf8, 0xaf, - 0x2c, 0xdd, 0x9f, 0x7f, 0x70, 0x30, 0xbf, 0x80, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8, 0x37, - 0x2c, 0xb8, 0xc2, 0x43, 0xd3, 0xf7, 0xdf, 0xff, 0x2e, 0xaf, 0xe5, 0xaa, 0x24, 0x15, 0x13, 0xd9, - 0x20, 0x41, 0x6b, 0xe9, 0x23, 0x62, 0x40, 0xaf, 0x54, 0x8f, 0xd6, 0x16, 0x3e, 0x6a, 0xe7, 0xec, - 0x7f, 0x56, 0x84, 0x71, 0x11, 0xb2, 0x4a, 0xdc, 0x01, 0x2f, 0x19, 0x4b, 0xe2, 0xc9, 0xc4, 0x92, - 0x98, 0x36, 0x90, 0x8f, 0xe7, 0xf8, 0x0f, 0x61, 0x9a, 0x1e, 0xce, 0xd7, 0x89, 0x13, 0x44, 0x9b, - 0xc4, 0xe1, 0xe6, 0x57, 0xc5, 0x23, 0x9f, 0xfe, 0x4a, 0x3c, 0x77, 0x33, 0x49, 0x0c, 0xa7, 0xe9, - 0x7f, 0x33, 0xdd, 0x39, 0x1e, 0x4c, 0xa5, 0xa2, 0x8e, 0x7d, 0x0a, 0x4a, 0xca, 0x8d, 0x44, 0x1c, - 0x3a, 0xdd, 0x83, 0xf7, 0x25, 0x29, 0x70, 0x11, 0x5a, 0xec, 0xc2, 0x14, 0x93, 0xb3, 0xff, 0x41, - 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x61, 0xe8, 0x6e, 0x7b, 0xa4, 0x21, 0x76, 0xec, - 0xfb, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0x73, 0xe5, 0x59, 0x14, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73, - 0x23, 0xb7, 0x3d, 0xf9, 0xde, 0xeb, 0x8f, 0x1a, 0x48, 0x33, 0xb8, 0x3d, 0x82, 0x45, 0x7d, 0xf4, - 0x69, 0x6e, 0x85, 0x78, 0xc3, 0xf3, 0xef, 0x79, 0xd7, 0x7c, 0x5f, 0x86, 0x85, 0xe8, 0x8f, 0xe0, - 0xb4, 0xb4, 0x3d, 0x54, 0xd5, 0xb1, 0x49, 0xad, 0xbf, 0x30, 0x9e, 0x9f, 0x87, 0x19, 0x4a, 0xda, - 0xf4, 0xda, 0x0e, 0x11, 0x81, 0x49, 0x11, 0x0f, 0x4d, 0x96, 0x89, 0xb1, 0xcb, 0x7c, 0xca, 0x99, - 0xb5, 0x63, 0x39, 0xf2, 0x0d, 0x93, 0x04, 0x4e, 0xd2, 0xb4, 0xff, 0xb6, 0x05, 0xcc, 0x83, 0xf5, - 0x04, 0xf8, 0x91, 0x8f, 0x99, 0xfc, 0xc8, 0x6c, 0xde, 0x20, 0xe7, 0xb0, 0x22, 0x2f, 0xf2, 0x95, - 0x55, 0x0d, 0xfc, 0xfb, 0xfb, 0xc2, 0x74, 0xa4, 0xf7, 0xfb, 0xc3, 0xfe, 0xdf, 0x16, 0x3f, 0xc4, - 0x94, 0x93, 0x07, 0xfa, 0x76, 0x18, 0xa9, 0x3b, 0x6d, 0xa7, 0xce, 0x13, 0xc6, 0xe4, 0x4a, 0xf4, - 0x8c, 0x4a, 0x0b, 0xcb, 0xa2, 0x06, 0x97, 0x50, 0xc9, 0xb8, 0x7a, 0x23, 0xb2, 0xb8, 0xa7, 0x54, - 0x4a, 0x35, 0x39, 0xb7, 0x0b, 0xe3, 0x06, 0xb1, 0x47, 0x2a, 0xce, 0xf8, 0x76, 0x7e, 0xc5, 0xaa, - 0x38, 0x90, 0x2d, 0x98, 0xf6, 0xb4, 0xff, 0xf4, 0x42, 0x91, 0x8f, 0xcb, 0xf7, 0xf7, 0xba, 0x44, - 0xd9, 0xed, 0xa3, 0x39, 0xc7, 0x26, 0xc8, 0xe0, 0x34, 0x65, 0xfb, 0x27, 0x2c, 0x78, 0x4c, 0x47, - 0xd4, 0xfc, 0x6f, 0x7a, 0xe9, 0x08, 0xca, 0x30, 0xe2, 0xb7, 0x49, 0xe0, 0x44, 0x7e, 0x20, 0x6e, - 0x8d, 0xcb, 0x72, 0xd0, 0x6f, 0x89, 0xf2, 0x43, 0x11, 0x6e, 0x5d, 0x52, 0x97, 0xe5, 0x58, 0xd5, - 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0xa1, 0xf0, 0xb4, 0x62, 0x67, 0x00, 0x53, 0x97, 0x87, 0x58, 0x40, - 0xec, 0xaf, 0x59, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x6f, 0xc1, 0x54, 0xcb, 0x89, 0xea, 0x3b, 0x2b, - 0xf7, 0xdb, 0x01, 0xd7, 0xb8, 0xc8, 0x71, 0x7a, 0xa6, 0xd7, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0x56, - 0xae, 0x25, 0x88, 0xe1, 0x14, 0x79, 0xb4, 0x09, 0xa3, 0xac, 0x8c, 0x39, 0x11, 0x86, 0xdd, 0x58, - 0x83, 0xbc, 0xd6, 0x94, 0xc5, 0xc1, 0x5a, 0x4c, 0x07, 0xeb, 0x44, 0xed, 0x9f, 0x2e, 0xf2, 0xdd, - 0xce, 0x58, 0xf9, 0xa7, 0x61, 0xb8, 0xed, 0x37, 0x96, 0x2b, 0x65, 0x2c, 0x66, 0x41, 0x5d, 0x23, - 0x55, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x86, 0x11, 0xf1, 0x53, 0x6a, 0xc8, 0xd8, 0xd9, 0x2c, 0xf0, - 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x76, 0xe0, 0xef, 0xb9, 0x0d, 0x16, 0xdc, 0xa2, 0x68, 0x1a, - 0x0b, 0x55, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0xef, 0x78, 0x21, 0x67, 0x47, 0xb4, 0x50, - 0xb6, 0xca, 0x8c, 0xe5, 0xb6, 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x84, 0xa1, 0xc8, 0x61, 0xc6, 0x2f, - 0x83, 0xf9, 0xc6, 0xb7, 0x1b, 0x14, 0x43, 0xcf, 0x4d, 0x42, 0x2b, 0x60, 0x51, 0x11, 0x7d, 0x4a, - 0xfa, 0xf3, 0xf2, 0x83, 0x5d, 0x58, 0xbd, 0xf7, 0x77, 0x09, 0x68, 0xde, 0xbc, 0xc2, 0x9a, 0xde, - 0xa0, 0x85, 0x5e, 0x01, 0x20, 0xf7, 0x23, 0x12, 0x78, 0x4e, 0x53, 0xd9, 0x96, 0x29, 0xbe, 0xa0, - 0xec, 0xaf, 0xfb, 0xd1, 0xed, 0x90, 0xac, 0x28, 0x0c, 0xac, 0x61, 0xdb, 0xbf, 0x51, 0x02, 0x88, - 0xf9, 0x76, 0xf4, 0x76, 0xea, 0xe0, 0x7a, 0xb6, 0x3b, 0xa7, 0x7f, 0x7c, 0xa7, 0x16, 0xfa, 0x1e, - 0x0b, 0x46, 0x9d, 0x66, 0xd3, 0xaf, 0x3b, 0x3c, 0xd8, 0x70, 0xa1, 0xfb, 0xc1, 0x29, 0xda, 0x5f, - 0x8c, 0x6b, 0xf0, 0x2e, 0xbc, 0x20, 0x57, 0xa8, 0x06, 0xe9, 0xd9, 0x0b, 0xbd, 0x61, 0xf4, 0x21, - 0xf9, 0x54, 0x2c, 0x1a, 0x43, 0xa9, 0x9e, 0x8a, 0x25, 0x76, 0x47, 0xe8, 0xaf, 0xc4, 0xdb, 0xc6, - 0x2b, 0x71, 0x20, 0xdf, 0x61, 0xd1, 0x60, 0x5f, 0x7b, 0x3d, 0x10, 0x51, 0x55, 0x0f, 0x5e, 0x30, - 0x98, 0xef, 0x1d, 0xa8, 0xbd, 0x93, 0x7a, 0x04, 0x2e, 0xf8, 0x1c, 0x4c, 0x36, 0x4c, 0x26, 0x40, - 0xac, 0xc4, 0xa7, 0xf2, 0xe8, 0x26, 0x78, 0x86, 0xf8, 0xda, 0x4f, 0x00, 0x70, 0x92, 0x30, 0xaa, - 0xf2, 0x58, 0x16, 0x15, 0x6f, 0xcb, 0x17, 0x9e, 0x17, 0x76, 0xee, 0x5c, 0xee, 0x87, 0x11, 0x69, - 0x51, 0xcc, 0xf8, 0x76, 0x5f, 0x17, 0x75, 0xb1, 0xa2, 0x82, 0x5e, 0x87, 0x21, 0xe6, 0x1e, 0x16, - 0xce, 0x8e, 0xe4, 0x4b, 0x9c, 0xcd, 0xe0, 0x6c, 0xf1, 0x86, 0x64, 0x7f, 0x43, 0x2c, 0x28, 0xa0, - 0xeb, 0xd2, 0xf9, 0x32, 0xac, 0x78, 0xb7, 0x43, 0xc2, 0x9c, 0x2f, 0x4b, 0x4b, 0xef, 0x8f, 0xfd, - 0x2a, 0x79, 0x79, 0x66, 0x06, 0x33, 0xa3, 0x26, 0xe5, 0xa2, 0xc4, 0x7f, 0x99, 0x18, 0x6d, 0x16, - 0xf2, 0xbb, 0x67, 0x26, 0x4f, 0x8b, 0x87, 0xf3, 0x8e, 0x49, 0x02, 0x27, 0x69, 0x52, 0x8e, 0x94, - 0xef, 0x7a, 0xe1, 0xbb, 0xd1, 0xeb, 0xec, 0xe0, 0x0f, 0x71, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, - 0x9f, 0x28, 0x7b, 0x30, 0xe7, 0xc1, 0x54, 0x72, 0x8b, 0x3e, 0x52, 0x76, 0xe4, 0x0f, 0x06, 0x60, - 0xc2, 0x5c, 0x52, 0xe8, 0x0a, 0x94, 0x04, 0x11, 0x95, 0xcc, 0x40, 0xed, 0x92, 0x35, 0x09, 0xc0, - 0x31, 0x0e, 0xcb, 0x61, 0xc1, 0xaa, 0x6b, 0xc6, 0xba, 0x71, 0x0e, 0x0b, 0x05, 0xc1, 0x1a, 0x16, - 0x7d, 0x58, 0x6d, 0xfa, 0x7e, 0xa4, 0x2e, 0x24, 0xb5, 0xee, 0x96, 0x58, 0x29, 0x16, 0x50, 0x7a, - 0x11, 0xed, 0x92, 0xc0, 0x23, 0x4d, 0x33, 0xec, 0xb1, 0xba, 0x88, 0x6e, 0xe8, 0x40, 0x6c, 0xe2, - 0xd2, 0xeb, 0xd4, 0x0f, 0xd9, 0x42, 0x16, 0xcf, 0xb7, 0xd8, 0xf8, 0xb9, 0xc6, 0xfd, 0xbf, 0x25, - 0x1c, 0x7d, 0x12, 0x1e, 0x53, 0xa1, 0x9d, 0x30, 0xd7, 0x66, 0xc8, 0x16, 0x87, 0x0c, 0x69, 0xcb, - 0x63, 0xcb, 0xd9, 0x68, 0x38, 0xaf, 0x3e, 0x7a, 0x0d, 0x26, 0x04, 0x8b, 0x2f, 0x29, 0x0e, 0x9b, - 0x06, 0x36, 0x37, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xc0, 0xcd, 0x8c, 0xcb, 0x96, 0x14, 0x46, 0xd2, - 0x81, 0x9b, 0x75, 0x38, 0x4e, 0xd5, 0x40, 0x8b, 0x30, 0xc9, 0x79, 0x30, 0xd7, 0xdb, 0xe6, 0x73, - 0x22, 0x5c, 0xab, 0xd4, 0x96, 0xba, 0x65, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0c, 0x63, 0x4e, 0x50, - 0xdf, 0x71, 0x23, 0x52, 0x8f, 0x3a, 0x01, 0xf7, 0xb9, 0xd2, 0x2c, 0x94, 0x16, 0x35, 0x18, 0x36, - 0x30, 0xed, 0xb7, 0x61, 0x26, 0x23, 0x30, 0x04, 0x5d, 0x38, 0x4e, 0xdb, 0x95, 0xdf, 0x94, 0x30, - 0x63, 0x5e, 0xac, 0x56, 0xe4, 0xd7, 0x68, 0x58, 0x74, 0x75, 0xb2, 0x00, 0x12, 0x5a, 0x1e, 0x44, - 0xb5, 0x3a, 0x57, 0x25, 0x00, 0xc7, 0x38, 0xf6, 0xff, 0x28, 0xc0, 0x64, 0x86, 0x6e, 0x85, 0xe5, - 0xe2, 0x4b, 0x3c, 0x52, 0xe2, 0xd4, 0x7b, 0x66, 0x1c, 0xf0, 0xc2, 0x11, 0xe2, 0x80, 0x17, 0x7b, - 0xc5, 0x01, 0x1f, 0x78, 0x27, 0x71, 0xc0, 0xcd, 0x11, 0x1b, 0xec, 0x6b, 0xc4, 0x32, 0x62, 0x87, - 0x0f, 0x1d, 0x31, 0x76, 0xb8, 0x31, 0xe8, 0xc3, 0x7d, 0x0c, 0xfa, 0x0f, 0x17, 0x60, 0x2a, 0x69, - 0x49, 0x79, 0x02, 0x72, 0xdb, 0xd7, 0x0d, 0xb9, 0xed, 0xe5, 0x7e, 0x5c, 0x61, 0x73, 0x65, 0xb8, - 0x38, 0x21, 0xc3, 0xfd, 0x60, 0x5f, 0xd4, 0xba, 0xcb, 0x73, 0xff, 0x66, 0x01, 0x4e, 0x67, 0xfa, - 0xe2, 0x9e, 0xc0, 0xd8, 0xdc, 0x32, 0xc6, 0xe6, 0xb9, 0xbe, 0xdd, 0x84, 0x73, 0x07, 0xe8, 0x6e, - 0x62, 0x80, 0xae, 0xf4, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb5, 0x08, 0xe7, 0x33, 0xeb, 0xc5, 0x62, - 0xcf, 0x55, 0x43, 0xec, 0xf9, 0x7c, 0x42, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x3c, 0x72, 0x50, 0xe1, - 0x2e, 0xcb, 0xa2, 0x1c, 0x3c, 0xa4, 0x0c, 0xd4, 0x70, 0x97, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x9b, - 0x49, 0xf6, 0xf9, 0x6f, 0x2c, 0x38, 0x9b, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x37, 0x65, 0x5d, - 0x4f, 0xf7, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0xbf, 0x36, 0x90, 0xf3, 0x2d, 0xec, 0x25, 0x7f, 0x0b, - 0x46, 0x9d, 0x7a, 0x9d, 0x84, 0xe1, 0x9a, 0xdf, 0x50, 0xa1, 0x8e, 0x9f, 0x63, 0xef, 0xac, 0xb8, - 0xf8, 0xf0, 0x60, 0x7e, 0x2e, 0x49, 0x22, 0x06, 0x63, 0x9d, 0x02, 0xfa, 0x34, 0x8c, 0x84, 0xe2, - 0xde, 0x14, 0x73, 0xff, 0x42, 0x9f, 0x83, 0xe3, 0x6c, 0x92, 0xa6, 0x19, 0x8b, 0x49, 0x49, 0x2a, - 0x14, 0x49, 0x33, 0x6e, 0x4b, 0xe1, 0x58, 0xe3, 0xb6, 0x3c, 0x0f, 0xb0, 0xa7, 0x1e, 0x03, 0x49, - 0xf9, 0x83, 0xf6, 0x4c, 0xd0, 0xb0, 0xd0, 0xc7, 0x61, 0x2a, 0xe4, 0xc1, 0x0a, 0x97, 0x9b, 0x4e, - 0xc8, 0x9c, 0x65, 0xc4, 0x2a, 0x64, 0xf1, 0x9e, 0x6a, 0x09, 0x18, 0x4e, 0x61, 0xa3, 0x55, 0xd9, - 0x2a, 0x8b, 0xac, 0xc8, 0x17, 0xe6, 0xa5, 0xb8, 0x45, 0x91, 0x09, 0xf8, 0x54, 0x72, 0xf8, 0xd9, - 0xc0, 0x6b, 0x35, 0xd1, 0xa7, 0x01, 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xfc, 0xc3, 0x93, 0x9e, - 0x2a, 0x8d, 0x4c, 0xdb, 0x5e, 0xe6, 0xe1, 0x5a, 0x56, 0x44, 0xb0, 0x46, 0xd0, 0xfe, 0xe1, 0x01, - 0x78, 0xbc, 0xcb, 0x19, 0x89, 0x16, 0x4d, 0x3d, 0xec, 0x33, 0xc9, 0xc7, 0xf5, 0x5c, 0x66, 0x65, - 0xe3, 0xb5, 0x9d, 0x58, 0x8a, 0x85, 0x77, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, 0xc4, 0x1e, 0xdc, 0xe2, - 0xf3, 0x63, 0x47, 0x3c, 0xfb, 0x8f, 0x51, 0x0e, 0xb2, 0x95, 0x21, 0x4c, 0x78, 0xbe, 0xef, 0xee, - 0xf4, 0x2d, 0x5d, 0x38, 0x59, 0x29, 0xf1, 0x6f, 0x59, 0x70, 0xae, 0x6b, 0xd0, 0x8e, 0x6f, 0x40, - 0x86, 0xc1, 0xfe, 0x82, 0x05, 0x4f, 0x66, 0xd6, 0x30, 0xcc, 0x8c, 0xae, 0x40, 0xa9, 0x4e, 0x0b, - 0x35, 0x2f, 0xcd, 0xd8, 0x7d, 0x5d, 0x02, 0x70, 0x8c, 0x63, 0x58, 0x13, 0x15, 0x7a, 0x5a, 0x13, - 0xfd, 0x8a, 0x05, 0xa9, 0x4d, 0x7f, 0x02, 0xb7, 0x4f, 0xc5, 0xbc, 0x7d, 0xde, 0xdf, 0xcf, 0x68, - 0xe6, 0x5c, 0x3c, 0x7f, 0x3c, 0x09, 0x67, 0x72, 0xbc, 0x94, 0xf6, 0x60, 0x7a, 0xbb, 0x4e, 0x4c, - 0xff, 0xd7, 0x6e, 0x71, 0x61, 0xba, 0x3a, 0xcb, 0xb2, 0x5c, 0xa5, 0xd3, 0x29, 0x14, 0x9c, 0x6e, - 0x02, 0x7d, 0xc1, 0x82, 0x53, 0xce, 0xbd, 0x70, 0x85, 0x72, 0x11, 0x6e, 0x7d, 0xa9, 0xe9, 0xd7, - 0x77, 0xe9, 0x11, 0x2d, 0x37, 0xc2, 0x8b, 0x99, 0x92, 0x9d, 0xbb, 0xb5, 0x14, 0xbe, 0xd1, 0x3c, - 0x4b, 0xde, 0x9a, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0x44, 0xf4, 0xa7, 0x6f, 0x94, 0x2e, 0x1e, - 0xda, 0x59, 0xee, 0x64, 0xfc, 0x5a, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0xcf, 0x42, 0x69, 0x5b, 0xfa, - 0x78, 0x66, 0x5c, 0xbb, 0xf1, 0x40, 0x76, 0xf7, 0x7c, 0xe5, 0xea, 0x59, 0x85, 0x84, 0x63, 0xa2, - 0xe8, 0x35, 0x28, 0x7a, 0x5b, 0x61, 0xb7, 0xfc, 0xa7, 0x09, 0x3b, 0x3c, 0x1e, 0x07, 0x61, 0x7d, - 0xb5, 0x86, 0x69, 0x45, 0x74, 0x1d, 0x8a, 0xc1, 0x66, 0x43, 0x88, 0x25, 0x33, 0x37, 0x29, 0x5e, - 0x2a, 0xe7, 0xf4, 0x8a, 0x51, 0xc2, 0x4b, 0x65, 0x4c, 0x49, 0xa0, 0x2a, 0x0c, 0x32, 0xd7, 0x1e, - 0x71, 0xc9, 0x65, 0xb2, 0xf3, 0x5d, 0x5c, 0xe4, 0x78, 0xb0, 0x04, 0x86, 0x80, 0x39, 0x21, 0xb4, - 0x01, 0x43, 0x75, 0x96, 0x2b, 0x53, 0x44, 0x82, 0xfb, 0x50, 0xa6, 0x00, 0xb2, 0x4b, 0x12, 0x51, - 0x21, 0x8f, 0x63, 0x18, 0x58, 0xd0, 0x62, 0x54, 0x49, 0x7b, 0x67, 0x2b, 0x14, 0xb9, 0x9d, 0xb3, - 0xa9, 0x76, 0xc9, 0x8d, 0x2b, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0x57, 0xa0, 0xb0, 0x55, 0x17, - 0x6e, 0x3b, 0x99, 0x92, 0x48, 0x33, 0x94, 0xc5, 0xd2, 0xd0, 0x83, 0x83, 0xf9, 0xc2, 0xea, 0x32, - 0x2e, 0x6c, 0xd5, 0xd1, 0x3a, 0x0c, 0x6f, 0x71, 0xe7, 0x77, 0x21, 0x6c, 0x7c, 0x2a, 0xdb, 0x2f, - 0x3f, 0xe5, 0x1f, 0xcf, 0x3d, 0x56, 0x04, 0x00, 0x4b, 0x22, 0x2c, 0x40, 0xbe, 0x72, 0xe2, 0x17, - 0x41, 0xd3, 0x16, 0x8e, 0x16, 0x78, 0x81, 0x33, 0x1d, 0x71, 0x28, 0x00, 0xac, 0x51, 0xa4, 0xab, - 0xda, 0x91, 0x09, 0xf6, 0x45, 0xb0, 0x99, 0xcc, 0x55, 0xad, 0xb2, 0xf0, 0x77, 0x5b, 0xd5, 0x0a, - 0x09, 0xc7, 0x44, 0xd1, 0x2e, 0x8c, 0xef, 0x85, 0xed, 0x1d, 0x22, 0xb7, 0x34, 0x8b, 0x3d, 0x93, - 0x73, 0x2f, 0xdf, 0x11, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x66, 0xea, 0x14, 0x62, 0x3a, 0xfd, 0x3b, - 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0xc3, 0xff, 0x56, 0xc7, 0xdf, 0xdc, 0x8f, 0x88, 0x88, 0x75, 0x96, - 0x39, 0xfc, 0x6f, 0x70, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x1d, 0x31, 0x3c, 0xec, - 0xf4, 0x9c, 0xca, 0x0f, 0x48, 0xba, 0x28, 0x91, 0x72, 0x06, 0x85, 0x9d, 0x96, 0x31, 0x29, 0x76, - 0x4a, 0xb6, 0x77, 0xfc, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x74, 0xfe, 0x29, 0x59, 0xcd, 0xc0, 0x4f, - 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0xd4, 0x80, 0x89, 0xb6, 0x1f, 0x44, 0xf7, 0xfc, 0x40, - 0xae, 0x2f, 0xd4, 0x45, 0x58, 0x62, 0x60, 0x8a, 0x16, 0x59, 0x18, 0x41, 0x13, 0x82, 0x13, 0x34, - 0xd1, 0x27, 0x60, 0x38, 0xac, 0x3b, 0x4d, 0x52, 0xb9, 0x35, 0x3b, 0x93, 0x7f, 0xfd, 0xd4, 0x38, - 0x4a, 0xce, 0xea, 0xe2, 0xd1, 0xf4, 0x39, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, 0x0c, 0xb2, 0x04, 0x68, - 0x2c, 0x30, 0x5f, 0x4e, 0x5c, 0xd5, 0x94, 0x55, 0x34, 0x3f, 0x9b, 0x58, 0x31, 0xe6, 0xd5, 0xe9, - 0x1e, 0x10, 0x6f, 0x06, 0x3f, 0x9c, 0x3d, 0x9d, 0xbf, 0x07, 0xc4, 0x53, 0xe3, 0x56, 0xad, 0xdb, - 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x9e, 0xe9, 0x62, 0xce, 0x93, 0x7b, - 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0x7b, 0xc3, 0x69, 0x9e, 0x85, 0xbd, 0x32, - 0xbf, 0xcb, 0x4a, 0x29, 0x20, 0x3f, 0xdc, 0xaf, 0xd0, 0xeb, 0x18, 0x59, 0xf0, 0x2f, 0x58, 0x70, - 0xa6, 0x9d, 0xf9, 0x21, 0x82, 0x01, 0xe8, 0x4f, 0x76, 0xc6, 0x3f, 0x5d, 0x05, 0x71, 0xcc, 0x86, - 0xe3, 0x9c, 0x96, 0x92, 0xcf, 0x9c, 0xe2, 0x3b, 0x7e, 0xe6, 0xac, 0xc1, 0x08, 0x63, 0x32, 0x7b, - 0xe4, 0x8e, 0x4e, 0xbe, 0xf6, 0x18, 0x2b, 0xb1, 0x2c, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xa0, 0x05, - 0xe7, 0x92, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xe4, 0x47, 0xfe, 0xc0, 0x5d, 0x15, 0xdf, 0x9f, 0xe2, - 0xff, 0x0d, 0xe4, 0xc3, 0x5e, 0x08, 0xb8, 0x7b, 0x63, 0xa8, 0x9c, 0xf1, 0xc2, 0x1e, 0x32, 0xb5, - 0x0a, 0x7d, 0xbc, 0xb2, 0x5f, 0x84, 0xb1, 0x96, 0xdf, 0xf1, 0x22, 0x61, 0xfd, 0x23, 0x2c, 0x11, - 0x98, 0x06, 0x7e, 0x4d, 0x2b, 0xc7, 0x06, 0x56, 0xe2, 0x6d, 0x3e, 0xf2, 0xd0, 0x6f, 0xf3, 0x37, - 0x61, 0xcc, 0xd3, 0xcc, 0x55, 0x05, 0x3f, 0x70, 0x29, 0x3f, 0x6a, 0xab, 0x6e, 0xdc, 0xca, 0x7b, - 0xa9, 0x97, 0x60, 0x83, 0xda, 0xc9, 0x3e, 0xf8, 0xbe, 0x6c, 0x65, 0x30, 0xf5, 0x5c, 0x04, 0xf0, - 0x51, 0x53, 0x04, 0x70, 0x29, 0x29, 0x02, 0x48, 0x49, 0x94, 0x8d, 0xd7, 0x7f, 0xff, 0x49, 0x69, - 0xfa, 0x0d, 0x84, 0x68, 0x37, 0xe1, 0x42, 0xaf, 0x6b, 0x89, 0x99, 0x81, 0x35, 0x94, 0xfe, 0x30, - 0x36, 0x03, 0x6b, 0x54, 0xca, 0x98, 0x41, 0xfa, 0x0d, 0xb1, 0x63, 0xff, 0x37, 0x0b, 0x8a, 0x55, - 0xbf, 0x71, 0x02, 0x0f, 0xde, 0x8f, 0x19, 0x0f, 0xde, 0xc7, 0xb3, 0x2f, 0xc4, 0x46, 0xae, 0x3c, - 0x7c, 0x25, 0x21, 0x0f, 0x3f, 0x97, 0x47, 0xa0, 0xbb, 0xf4, 0xfb, 0x27, 0x8b, 0x30, 0x5a, 0xf5, - 0x1b, 0xca, 0x06, 0xfb, 0xd7, 0x1e, 0xc6, 0x06, 0x3b, 0x37, 0xb5, 0x82, 0x46, 0x99, 0x59, 0x8f, - 0x49, 0xf7, 0xd3, 0x6f, 0x30, 0x53, 0xec, 0xbb, 0xc4, 0xdd, 0xde, 0x89, 0x48, 0x23, 0xf9, 0x39, - 0x27, 0x67, 0x8a, 0xfd, 0x7b, 0x05, 0x98, 0x4c, 0xb4, 0x8e, 0x9a, 0x30, 0xde, 0xd4, 0xa5, 0xad, - 0x62, 0x9d, 0x3e, 0x94, 0xa0, 0x56, 0x98, 0xb2, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0x16, 0x00, 0x94, - 0xfa, 0x51, 0x8a, 0xf5, 0x18, 0xd7, 0xaf, 0xf4, 0x93, 0x21, 0xd6, 0x30, 0xd0, 0x4b, 0x30, 0x1a, - 0xf9, 0x6d, 0xbf, 0xe9, 0x6f, 0xef, 0xdf, 0x20, 0x32, 0xa8, 0x93, 0x32, 0x50, 0xdb, 0x88, 0x41, - 0x58, 0xc7, 0x43, 0xf7, 0x61, 0x5a, 0x11, 0xa9, 0x1d, 0x83, 0x04, 0x9a, 0x49, 0x15, 0xd6, 0x93, - 0x14, 0x71, 0xba, 0x11, 0xfb, 0xa7, 0x8a, 0x7c, 0x88, 0xbd, 0xc8, 0x7d, 0x6f, 0x37, 0xbc, 0xbb, - 0x77, 0xc3, 0x57, 0x2d, 0x98, 0xa2, 0xad, 0x33, 0xeb, 0x1b, 0x79, 0xcd, 0xab, 0x38, 0xd1, 0x56, - 0x97, 0x38, 0xd1, 0x97, 0xe8, 0xa9, 0xd9, 0xf0, 0x3b, 0x91, 0x90, 0xdd, 0x69, 0xc7, 0x22, 0x2d, - 0xc5, 0x02, 0x2a, 0xf0, 0x48, 0x10, 0x08, 0x8f, 0x41, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x19, - 0x46, 0x7a, 0x20, 0x3b, 0x8c, 0x34, 0x0f, 0x8e, 0x29, 0xec, 0x34, 0x04, 0xc3, 0xa5, 0x05, 0xc7, - 0x94, 0x06, 0x1c, 0x31, 0x8e, 0xfd, 0xb3, 0x45, 0x18, 0xab, 0xfa, 0x8d, 0x58, 0xf5, 0xf8, 0xa2, - 0xa1, 0x7a, 0xbc, 0x90, 0x50, 0x3d, 0x4e, 0xe9, 0xb8, 0xef, 0x29, 0x1a, 0xbf, 0x5e, 0x8a, 0xc6, - 0x7f, 0x6a, 0xb1, 0x59, 0x2b, 0xaf, 0xd7, 0xb8, 0x31, 0x17, 0xba, 0x0a, 0xa3, 0xec, 0x80, 0x61, - 0x2e, 0xaa, 0x52, 0x1f, 0xc7, 0xd2, 0x23, 0xad, 0xc7, 0xc5, 0x58, 0xc7, 0x41, 0x97, 0x61, 0x24, - 0x24, 0x4e, 0x50, 0xdf, 0x51, 0xa7, 0xab, 0x50, 0x9e, 0xf1, 0x32, 0xac, 0xa0, 0xe8, 0x8d, 0x38, - 0x2e, 0x63, 0x31, 0xdf, 0xe5, 0x4d, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0xc1, 0x18, 0xed, 0xbb, 0x80, - 0xd2, 0xf8, 0x7d, 0x04, 0x24, 0x9b, 0x37, 0x03, 0x92, 0x95, 0x52, 0xc1, 0xc8, 0xfe, 0xdc, 0x82, - 0x89, 0xaa, 0xdf, 0xa0, 0x5b, 0xf7, 0x9b, 0x69, 0x9f, 0xea, 0x41, 0x69, 0x87, 0xba, 0x04, 0xa5, - 0xbd, 0x08, 0x83, 0x55, 0xbf, 0x51, 0xa9, 0x76, 0xf3, 0x37, 0xb7, 0xff, 0x96, 0x05, 0xc3, 0x55, - 0xbf, 0x71, 0x02, 0x6a, 0x81, 0x8f, 0x9a, 0x6a, 0x81, 0xc7, 0x72, 0xd6, 0x4d, 0x8e, 0x26, 0xe0, - 0x6f, 0x0c, 0xc0, 0x38, 0xed, 0xa7, 0xbf, 0x2d, 0xa7, 0xd2, 0x18, 0x36, 0xab, 0x8f, 0x61, 0xa3, - 0x5c, 0xb8, 0xdf, 0x6c, 0xfa, 0xf7, 0x92, 0xd3, 0xba, 0xca, 0x4a, 0xb1, 0x80, 0xa2, 0x67, 0x61, - 0xa4, 0x1d, 0x90, 0x3d, 0xd7, 0x17, 0xec, 0xad, 0xa6, 0x64, 0xa9, 0x8a, 0x72, 0xac, 0x30, 0xe8, - 0xb3, 0x30, 0x74, 0x3d, 0x7a, 0x95, 0xd7, 0x7d, 0xaf, 0xc1, 0x25, 0xe7, 0x45, 0x91, 0x2a, 0x42, - 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x25, 0xf6, 0x9f, 0x1d, 0x3b, 0x47, 0x4f, 0x3a, 0x2a, 0x92, - 0xd0, 0x09, 0x02, 0x38, 0xa6, 0x85, 0x9e, 0x07, 0x88, 0x64, 0xf4, 0xf1, 0x50, 0x04, 0x9f, 0x52, - 0x4f, 0x01, 0x15, 0x97, 0x3c, 0xc4, 0x1a, 0x16, 0x7a, 0x06, 0x4a, 0x91, 0xe3, 0x36, 0x6f, 0xba, - 0x1e, 0x09, 0x99, 0x44, 0xbc, 0x28, 0x73, 0xc1, 0x89, 0x42, 0x1c, 0xc3, 0x29, 0x2b, 0xc6, 0x22, - 0x33, 0xf0, 0x94, 0xc5, 0x23, 0x0c, 0x9b, 0xb1, 0x62, 0x37, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x0e, - 0x3c, 0xe1, 0x7a, 0x2c, 0xad, 0x02, 0xa9, 0xed, 0xba, 0xed, 0x8d, 0x9b, 0xb5, 0x3b, 0x24, 0x70, - 0xb7, 0xf6, 0x97, 0x9c, 0xfa, 0x2e, 0xf1, 0x64, 0x3a, 0xc9, 0xf7, 0x8b, 0x2e, 0x3e, 0x51, 0xe9, - 0x82, 0x8b, 0xbb, 0x52, 0xb2, 0x5f, 0x86, 0xd3, 0x55, 0xbf, 0x51, 0xf5, 0x83, 0x68, 0xd5, 0x0f, - 0xee, 0x39, 0x41, 0x43, 0xae, 0x94, 0x79, 0x19, 0x25, 0x81, 0x1e, 0x85, 0x83, 0xfc, 0xa0, 0x30, - 0x22, 0x20, 0xbc, 0xc0, 0x98, 0xaf, 0x23, 0xfa, 0xf6, 0xd4, 0x19, 0x1b, 0xa0, 0x72, 0x8c, 0x5c, - 0x73, 0x22, 0x82, 0x6e, 0xb1, 0xdc, 0xc9, 0xf1, 0x8d, 0x28, 0xaa, 0x3f, 0xad, 0xe5, 0x4e, 0x8e, - 0x81, 0x99, 0x57, 0xa8, 0x59, 0xdf, 0xfe, 0xef, 0x83, 0xec, 0x70, 0x4c, 0xe4, 0xa9, 0x40, 0x9f, - 0x81, 0x89, 0x90, 0xdc, 0x74, 0xbd, 0xce, 0x7d, 0x29, 0x8d, 0xe8, 0xe2, 0x9d, 0x55, 0x5b, 0xd1, - 0x31, 0xb9, 0x4c, 0xd3, 0x2c, 0xc3, 0x09, 0x6a, 0xa8, 0x05, 0x13, 0xf7, 0x5c, 0xaf, 0xe1, 0xdf, - 0x0b, 0x25, 0xfd, 0x91, 0x7c, 0xd1, 0xe6, 0x5d, 0x8e, 0x99, 0xe8, 0xa3, 0xd1, 0xdc, 0x5d, 0x83, - 0x18, 0x4e, 0x10, 0xa7, 0x0b, 0x30, 0xe8, 0x78, 0x8b, 0xe1, 0xed, 0x90, 0x04, 0x22, 0x0b, 0x36, - 0x5b, 0x80, 0x58, 0x16, 0xe2, 0x18, 0x4e, 0x17, 0x20, 0xfb, 0x73, 0x2d, 0xf0, 0x3b, 0x3c, 0x47, - 0x80, 0x58, 0x80, 0x58, 0x95, 0x62, 0x0d, 0x83, 0x6e, 0x50, 0xf6, 0x6f, 0xdd, 0xf7, 0xb0, 0xef, - 0x47, 0x72, 0x4b, 0xb3, 0xbc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x2a, 0xa0, 0xb0, 0xd3, 0x6e, - 0x37, 0x99, 0xd9, 0x87, 0xd3, 0x64, 0xa4, 0xb8, 0xca, 0xbd, 0xc8, 0x43, 0xa7, 0xd6, 0x52, 0x50, - 0x9c, 0x51, 0x83, 0x9e, 0xd5, 0x5b, 0xa2, 0xab, 0x83, 0xac, 0xab, 0x5c, 0x0d, 0x52, 0xe3, 0xfd, - 0x94, 0x30, 0xb4, 0x02, 0xc3, 0xe1, 0x7e, 0x58, 0x8f, 0x44, 0x0c, 0xb8, 0x9c, 0x54, 0x44, 0x35, - 0x86, 0xa2, 0x65, 0xc2, 0xe3, 0x55, 0xb0, 0xac, 0x8b, 0xea, 0x30, 0x23, 0x28, 0x2e, 0xef, 0x38, - 0x9e, 0x4a, 0xec, 0xc2, 0xad, 0x5f, 0xaf, 0x3e, 0x38, 0x98, 0x9f, 0x11, 0x2d, 0xeb, 0xe0, 0xc3, - 0x83, 0xf9, 0x33, 0x55, 0xbf, 0x91, 0x01, 0xc1, 0x59, 0xd4, 0xf8, 0xe2, 0xab, 0xd7, 0xfd, 0x56, - 0xbb, 0x1a, 0xf8, 0x5b, 0x6e, 0x93, 0x74, 0x53, 0x25, 0xd5, 0x0c, 0x4c, 0xb1, 0xf8, 0x8c, 0x32, - 0x9c, 0xa0, 0x66, 0x7f, 0x3b, 0xe3, 0x67, 0x58, 0xe2, 0xe7, 0xa8, 0x13, 0x10, 0xd4, 0x82, 0xf1, - 0x36, 0xdb, 0x26, 0x22, 0x72, 0xbf, 0x58, 0xeb, 0x2f, 0xf6, 0x29, 0x12, 0xb9, 0x47, 0xaf, 0x01, - 0x25, 0xb2, 0x64, 0x6f, 0xcd, 0xaa, 0x4e, 0x0e, 0x9b, 0xd4, 0xed, 0x1f, 0x7b, 0x8c, 0xdd, 0x88, - 0x35, 0x2e, 0xe7, 0x18, 0x16, 0xc6, 0xf6, 0xe2, 0x69, 0x35, 0x97, 0x2f, 0x70, 0x8b, 0xa7, 0x45, - 0x18, 0xec, 0x63, 0x59, 0x17, 0x7d, 0x1a, 0x26, 0xe8, 0x4b, 0x45, 0xcb, 0xa8, 0x72, 0x2a, 0x3f, - 0x28, 0x42, 0x9c, 0x48, 0x45, 0xcb, 0xea, 0xa1, 0x57, 0xc6, 0x09, 0x62, 0xe8, 0x0d, 0x66, 0x16, - 0x62, 0x26, 0x6b, 0xe9, 0x41, 0x5a, 0xb7, 0x00, 0x91, 0x64, 0x35, 0x22, 0x79, 0x89, 0x60, 0xec, - 0x47, 0x9b, 0x08, 0x06, 0xdd, 0x84, 0x71, 0x91, 0xfd, 0x58, 0xac, 0xdc, 0xa2, 0x21, 0x07, 0x1c, - 0xc7, 0x3a, 0xf0, 0x30, 0x59, 0x80, 0xcd, 0xca, 0x68, 0x1b, 0xce, 0x69, 0xd9, 0x88, 0xae, 0x05, - 0x0e, 0x53, 0xe6, 0xbb, 0xec, 0x38, 0xd5, 0xee, 0xea, 0x27, 0x1f, 0x1c, 0xcc, 0x9f, 0xdb, 0xe8, - 0x86, 0x88, 0xbb, 0xd3, 0x41, 0xb7, 0xe0, 0x34, 0x77, 0xe9, 0x2d, 0x13, 0xa7, 0xd1, 0x74, 0x3d, - 0xc5, 0x0c, 0xf0, 0x2d, 0x7f, 0xf6, 0xc1, 0xc1, 0xfc, 0xe9, 0xc5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, - 0x7d, 0x14, 0x4a, 0x0d, 0x2f, 0x14, 0x63, 0x30, 0x64, 0x24, 0x7c, 0x2a, 0x95, 0xd7, 0x6b, 0xea, - 0xfb, 0xe3, 0x3f, 0x38, 0xae, 0x80, 0xb6, 0xb9, 0xac, 0x58, 0x49, 0x30, 0x86, 0x53, 0x21, 0x8d, - 0x92, 0x42, 0x3e, 0xc3, 0xa9, 0x8f, 0x2b, 0x49, 0x94, 0xad, 0xbb, 0xe1, 0xef, 0x67, 0x10, 0x46, - 0xaf, 0x03, 0xa2, 0x2f, 0x08, 0xb7, 0x4e, 0x16, 0xeb, 0x2c, 0x2d, 0x04, 0x13, 0xad, 0x8f, 0x98, - 0x6e, 0x66, 0xb5, 0x14, 0x06, 0xce, 0xa8, 0x85, 0xae, 0xd3, 0x53, 0x45, 0x2f, 0x15, 0xa7, 0x96, - 0x4a, 0xcf, 0x57, 0x26, 0xed, 0x80, 0xd4, 0x9d, 0x88, 0x34, 0x4c, 0x8a, 0x38, 0x51, 0x0f, 0x35, - 0xe0, 0x09, 0xa7, 0x13, 0xf9, 0x4c, 0x0c, 0x6f, 0xa2, 0x6e, 0xf8, 0xbb, 0xc4, 0x63, 0x1a, 0xb0, - 0x91, 0xa5, 0x0b, 0x94, 0xdb, 0x58, 0xec, 0x82, 0x87, 0xbb, 0x52, 0xa1, 0x5c, 0xa2, 0xca, 0xc7, - 0x0b, 0x66, 0xa0, 0xa6, 0x8c, 0x9c, 0xbc, 0x2f, 0xc1, 0xe8, 0x8e, 0x1f, 0x46, 0xeb, 0x24, 0xba, - 0xe7, 0x07, 0xbb, 0x22, 0xde, 0x66, 0x1c, 0xa3, 0x39, 0x06, 0x61, 0x1d, 0x8f, 0x3e, 0x03, 0x99, - 0x7d, 0x46, 0xa5, 0xcc, 0x54, 0xe3, 0x23, 0xf1, 0x19, 0x73, 0x9d, 0x17, 0x63, 0x09, 0x97, 0xa8, - 0x95, 0xea, 0x32, 0x53, 0x73, 0x27, 0x50, 0x2b, 0xd5, 0x65, 0x2c, 0xe1, 0x74, 0xb9, 0x86, 0x3b, - 0x4e, 0x40, 0xaa, 0x81, 0x5f, 0x27, 0xa1, 0x16, 0x19, 0xfc, 0x71, 0x1e, 0x4d, 0x94, 0x2e, 0xd7, - 0x5a, 0x16, 0x02, 0xce, 0xae, 0x87, 0x48, 0x3a, 0x13, 0xd7, 0x44, 0xbe, 0x7e, 0x22, 0xcd, 0xcf, - 0xf4, 0x99, 0x8c, 0xcb, 0x83, 0x29, 0x95, 0x03, 0x8c, 0xc7, 0x0f, 0x0d, 0x67, 0x27, 0xd9, 0xda, - 0xee, 0x3f, 0xf8, 0xa8, 0xd2, 0xf8, 0x54, 0x12, 0x94, 0x70, 0x8a, 0xb6, 0x11, 0x8b, 0x6b, 0xaa, - 0x67, 0x2c, 0xae, 0x2b, 0x50, 0x0a, 0x3b, 0x9b, 0x0d, 0xbf, 0xe5, 0xb8, 0x1e, 0x53, 0x73, 0x6b, - 0xef, 0x91, 0x9a, 0x04, 0xe0, 0x18, 0x07, 0xad, 0xc2, 0x88, 0x23, 0xd5, 0x39, 0x28, 0x3f, 0xfa, - 0x8a, 0x52, 0xe2, 0xf0, 0x80, 0x04, 0x52, 0x81, 0xa3, 0xea, 0xa2, 0x57, 0x61, 0x5c, 0xb8, 0xa4, - 0x8a, 0xf4, 0x93, 0x33, 0xa6, 0xdf, 0x50, 0x4d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x86, 0xd1, 0xc8, - 0x6f, 0x32, 0xe7, 0x17, 0xca, 0xe6, 0x9d, 0xc9, 0x8f, 0x23, 0xb6, 0xa1, 0xd0, 0x74, 0x49, 0xaa, - 0xaa, 0x8a, 0x75, 0x3a, 0x68, 0x83, 0xaf, 0x77, 0x16, 0x21, 0x9b, 0x84, 0xb3, 0x8f, 0xe5, 0xdf, - 0x49, 0x2a, 0x90, 0xb6, 0xb9, 0x1d, 0x44, 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x60, 0xba, 0x1d, 0xb8, - 0x3e, 0x5b, 0x13, 0x4a, 0x93, 0x37, 0x6b, 0xa6, 0xe7, 0xa9, 0x26, 0x11, 0x70, 0xba, 0x0e, 0xf3, - 0x28, 0x16, 0x85, 0xb3, 0x67, 0x79, 0x86, 0x6a, 0xfe, 0xbc, 0xe3, 0x65, 0x58, 0x41, 0xd1, 0x1a, - 0x3b, 0x89, 0xb9, 0x64, 0x62, 0x76, 0x2e, 0x3f, 0xe0, 0x8b, 0x2e, 0xc1, 0xe0, 0xcc, 0xab, 0xfa, - 0x8b, 0x63, 0x0a, 0xa8, 0xa1, 0xa5, 0x32, 0xa4, 0x2f, 0x86, 0x70, 0xf6, 0x89, 0x2e, 0x46, 0x72, - 0x89, 0xe7, 0x45, 0xcc, 0x10, 0x18, 0xc5, 0x21, 0x4e, 0xd0, 0x44, 0x1f, 0x87, 0x29, 0x11, 0xa6, - 0x2e, 0x1e, 0xa6, 0x73, 0xb1, 0x49, 0x31, 0x4e, 0xc0, 0x70, 0x0a, 0x9b, 0x67, 0x0e, 0x70, 0x36, - 0x9b, 0x44, 0x1c, 0x7d, 0x37, 0x5d, 0x6f, 0x37, 0x9c, 0x3d, 0xcf, 0xce, 0x07, 0x91, 0x39, 0x20, - 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x06, 0x4c, 0xb5, 0x03, 0x42, 0x5a, 0x8c, 0xd1, 0x17, 0xf7, 0xd9, - 0x3c, 0x77, 0xa8, 0xa7, 0x3d, 0xa9, 0x26, 0x60, 0x87, 0x19, 0x65, 0x38, 0x45, 0x01, 0xdd, 0x83, - 0x11, 0x7f, 0x8f, 0x04, 0x3b, 0xc4, 0x69, 0xcc, 0x5e, 0xe8, 0x62, 0xe2, 0x2e, 0x2e, 0xb7, 0x5b, - 0x02, 0x37, 0xa1, 0xfd, 0x97, 0xc5, 0xbd, 0xb5, 0xff, 0xb2, 0x31, 0xf4, 0x43, 0x16, 0x9c, 0x95, - 0x0a, 0x83, 0x5a, 0x9b, 0x8e, 0xfa, 0xb2, 0xef, 0x85, 0x51, 0xc0, 0x5d, 0xc0, 0x9f, 0xcc, 0x77, - 0x8b, 0xde, 0xc8, 0xa9, 0xa4, 0x84, 0xa3, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0x5a, 0x86, - 0xe9, 0x90, 0x44, 0xf2, 0x30, 0x5a, 0x0c, 0x57, 0xdf, 0x28, 0xaf, 0xcf, 0x5e, 0xe4, 0xfe, 0xeb, - 0x74, 0x33, 0xd4, 0x92, 0x40, 0x9c, 0xc6, 0x9f, 0xfb, 0x56, 0x98, 0x4e, 0x5d, 0xff, 0x47, 0xc9, - 0x88, 0x32, 0xb7, 0x0b, 0xe3, 0xc6, 0x10, 0x3f, 0x52, 0xed, 0xf1, 0xbf, 0x1a, 0x86, 0x92, 0xd2, - 0x2c, 0xa2, 0x2b, 0xa6, 0xc2, 0xf8, 0x6c, 0x52, 0x61, 0x3c, 0x42, 0xdf, 0xf5, 0xba, 0x8e, 0x78, - 0x23, 0x23, 0x6a, 0x57, 0xde, 0x86, 0xee, 0xdf, 0x1d, 0x5b, 0x13, 0xd7, 0x16, 0xfb, 0xd6, 0x3c, - 0x0f, 0x74, 0x95, 0x00, 0x5f, 0x83, 0x69, 0xcf, 0x67, 0x3c, 0x27, 0x69, 0x48, 0x86, 0x82, 0xf1, - 0x0d, 0x25, 0x3d, 0x0c, 0x46, 0x02, 0x01, 0xa7, 0xeb, 0xd0, 0x06, 0xf9, 0xc5, 0x9f, 0x14, 0x39, - 0x73, 0xbe, 0x00, 0x0b, 0x28, 0xba, 0x08, 0x83, 0x6d, 0xbf, 0x51, 0xa9, 0x0a, 0x7e, 0x53, 0x8b, - 0x15, 0xd9, 0xa8, 0x54, 0x31, 0x87, 0xa1, 0x45, 0x18, 0x62, 0x3f, 0xc2, 0xd9, 0xb1, 0xfc, 0x78, - 0x07, 0xac, 0x86, 0x96, 0x6f, 0x86, 0x55, 0xc0, 0xa2, 0x22, 0x13, 0x7d, 0x51, 0x26, 0x9d, 0x89, - 0xbe, 0x86, 0x1f, 0x52, 0xf4, 0x25, 0x09, 0xe0, 0x98, 0x16, 0xba, 0x0f, 0xa7, 0x8d, 0x87, 0x11, - 0x5f, 0x22, 0x24, 0x14, 0x3e, 0xd7, 0x17, 0xbb, 0xbe, 0x88, 0x84, 0xa6, 0xfa, 0x9c, 0xe8, 0xf4, - 0xe9, 0x4a, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a, 0xc2, 0x74, 0x3d, 0xd5, 0xea, 0x48, 0xff, 0xad, - 0xaa, 0x09, 0x4d, 0xb7, 0x98, 0x26, 0x8c, 0x5e, 0x85, 0x91, 0xb7, 0xfc, 0x90, 0x9d, 0xd5, 0x82, - 0x47, 0x96, 0x0e, 0xbb, 0x23, 0x6f, 0xdc, 0xaa, 0xb1, 0xf2, 0xc3, 0x83, 0xf9, 0xd1, 0xaa, 0xdf, - 0x90, 0x7f, 0xb1, 0xaa, 0x80, 0xbe, 0xd7, 0x82, 0xb9, 0xf4, 0xcb, 0x4b, 0x75, 0x7a, 0xbc, 0xff, - 0x4e, 0xdb, 0xa2, 0xd1, 0xb9, 0x95, 0x5c, 0x72, 0xb8, 0x4b, 0x53, 0xf6, 0x2f, 0x59, 0x4c, 0xea, - 0x26, 0x34, 0x40, 0x24, 0xec, 0x34, 0x4f, 0x22, 0xcd, 0xe6, 0x8a, 0xa1, 0x9c, 0x7a, 0x68, 0xcb, - 0x85, 0x7f, 0x6e, 0x31, 0xcb, 0x85, 0x13, 0x74, 0x51, 0x78, 0x03, 0x46, 0x22, 0x99, 0xfe, 0xb4, - 0x4b, 0x66, 0x50, 0xad, 0x53, 0xcc, 0x7a, 0x43, 0x71, 0xac, 0x2a, 0xd3, 0xa9, 0x22, 0x63, 0xff, - 0x23, 0x3e, 0x03, 0x12, 0x72, 0x02, 0x3a, 0x80, 0xb2, 0xa9, 0x03, 0x98, 0xef, 0xf1, 0x05, 0x39, - 0xba, 0x80, 0x7f, 0x68, 0xf6, 0x9b, 0x49, 0x6a, 0xde, 0xed, 0x26, 0x33, 0xf6, 0x17, 0x2d, 0x80, - 0x38, 0x14, 0x2f, 0x93, 0x2f, 0xfb, 0x81, 0xcc, 0xb1, 0x98, 0x95, 0x4d, 0xe8, 0x65, 0xca, 0xa3, - 0xfa, 0x91, 0x5f, 0xf7, 0x9b, 0x42, 0xc3, 0xf5, 0x44, 0xac, 0x86, 0xe0, 0xe5, 0x87, 0xda, 0x6f, - 0xac, 0xb0, 0xd1, 0xbc, 0x0c, 0xfc, 0x55, 0x8c, 0x15, 0x63, 0x46, 0xd0, 0xaf, 0x1f, 0xb1, 0xe0, - 0x54, 0x96, 0xbd, 0x2b, 0x7d, 0xf1, 0x70, 0x99, 0x95, 0x32, 0x67, 0x52, 0xb3, 0x79, 0x47, 0x94, - 0x63, 0x85, 0xd1, 0x77, 0xe6, 0xb0, 0xa3, 0xc5, 0xc0, 0xbd, 0x05, 0xe3, 0xd5, 0x80, 0x68, 0x97, - 0xeb, 0x6b, 0xdc, 0x99, 0x9c, 0xf7, 0xe7, 0xd9, 0x23, 0x3b, 0x92, 0xdb, 0x3f, 0x5d, 0x80, 0x53, - 0xdc, 0x2a, 0x60, 0x71, 0xcf, 0x77, 0x1b, 0x55, 0xbf, 0x21, 0xb2, 0xbe, 0x7d, 0x0a, 0xc6, 0xda, - 0x9a, 0xa0, 0xb1, 0x5b, 0x3c, 0x47, 0x5d, 0x20, 0x19, 0x8b, 0x46, 0xf4, 0x52, 0x6c, 0xd0, 0x42, - 0x0d, 0x18, 0x23, 0x7b, 0x6e, 0x5d, 0xa9, 0x96, 0x0b, 0x47, 0xbe, 0xe8, 0x54, 0x2b, 0x2b, 0x1a, - 0x1d, 0x6c, 0x50, 0x7d, 0x04, 0xf9, 0x7c, 0xed, 0x1f, 0xb5, 0xe0, 0xb1, 0x9c, 0xe8, 0x8f, 0xb4, - 0xb9, 0x7b, 0xcc, 0xfe, 0x42, 0x2c, 0x5b, 0xd5, 0x1c, 0xb7, 0xca, 0xc0, 0x02, 0x8a, 0x3e, 0x01, - 0xc0, 0xad, 0x2a, 0xe8, 0x93, 0xbb, 0x57, 0x98, 0x3c, 0x23, 0xc2, 0x97, 0x16, 0xac, 0x49, 0xd6, - 0xc7, 0x1a, 0x2d, 0xfb, 0x4b, 0x03, 0x30, 0xc8, 0x93, 0xad, 0xaf, 0xc2, 0xf0, 0x0e, 0xcf, 0x85, - 0xd1, 0x4f, 0xda, 0x8d, 0x58, 0x18, 0xc2, 0x0b, 0xb0, 0xac, 0x8c, 0xd6, 0x60, 0x86, 0xe7, 0x12, - 0x69, 0x96, 0x49, 0xd3, 0xd9, 0x97, 0x92, 0x3b, 0x9e, 0x87, 0x53, 0x49, 0x30, 0x2b, 0x69, 0x14, - 0x9c, 0x55, 0x0f, 0xbd, 0x06, 0x13, 0xf4, 0x25, 0xe5, 0x77, 0x22, 0x49, 0x89, 0x67, 0x11, 0x51, - 0x4f, 0xb7, 0x0d, 0x03, 0x8a, 0x13, 0xd8, 0xf4, 0x31, 0xdf, 0x4e, 0xc9, 0x28, 0x07, 0xe3, 0xc7, - 0xbc, 0x29, 0x97, 0x34, 0x71, 0x99, 0xa1, 0x6b, 0x87, 0x99, 0xf5, 0x6e, 0xec, 0x04, 0x24, 0xdc, - 0xf1, 0x9b, 0x0d, 0xc6, 0xf4, 0x0d, 0x6a, 0x86, 0xae, 0x09, 0x38, 0x4e, 0xd5, 0xa0, 0x54, 0xb6, - 0x1c, 0xb7, 0xd9, 0x09, 0x48, 0x4c, 0x65, 0xc8, 0xa4, 0xb2, 0x9a, 0x80, 0xe3, 0x54, 0x8d, 0xde, - 0xc2, 0xd7, 0xe1, 0xe3, 0x11, 0xbe, 0xd2, 0x05, 0x7b, 0xba, 0x1a, 0xf8, 0xf4, 0xc4, 0x96, 0xb1, - 0x73, 0x94, 0x99, 0xf4, 0xb0, 0x74, 0xf3, 0xed, 0x12, 0x65, 0x4e, 0x18, 0x92, 0x72, 0x0a, 0x86, - 0xa5, 0x42, 0x4d, 0x38, 0xf8, 0x4a, 0x2a, 0xe8, 0x2a, 0x8c, 0x8a, 0x54, 0x14, 0xcc, 0x9a, 0x97, - 0xaf, 0x11, 0x66, 0x59, 0x51, 0x8e, 0x8b, 0xb1, 0x8e, 0x63, 0x7f, 0x5f, 0x01, 0x66, 0x32, 0xdc, - 0x31, 0xf8, 0x99, 0xb8, 0xed, 0x86, 0x91, 0x4a, 0x6a, 0xa8, 0x9d, 0x89, 0xbc, 0x1c, 0x2b, 0x0c, - 0xba, 0xf1, 0xf8, 0xa9, 0x9b, 0x3c, 0x69, 0x85, 0xb9, 0xb3, 0x80, 0x1e, 0x31, 0x3d, 0xe0, 0x05, - 0x18, 0xe8, 0x84, 0x44, 0xc6, 0x87, 0x54, 0x77, 0x10, 0x53, 0xb8, 0x31, 0x08, 0x7d, 0x13, 0x6c, - 0x2b, 0xdd, 0x95, 0xf6, 0x26, 0xe0, 0xda, 0x2b, 0x0e, 0xa3, 0x9d, 0x8b, 0x88, 0xe7, 0x78, 0x91, - 0x78, 0x39, 0xc4, 0x81, 0xce, 0x58, 0x29, 0x16, 0x50, 0xfb, 0x4b, 0x45, 0x38, 0x9b, 0xeb, 0xa0, - 0x45, 0xbb, 0xde, 0xf2, 0x3d, 0x37, 0xf2, 0x95, 0xc9, 0x0a, 0x0f, 0x6e, 0x46, 0xda, 0x3b, 0x6b, - 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x09, 0x06, 0x99, 0xb8, 0x2e, 0x95, 0xde, 0x71, 0xa9, 0xcc, 0xa3, - 0xdd, 0x70, 0x70, 0xdf, 0xa9, 0x73, 0x2f, 0xd2, 0xeb, 0xd8, 0x6f, 0x26, 0x4f, 0x47, 0xda, 0x5d, - 0xdf, 0x6f, 0x62, 0x06, 0x44, 0x1f, 0x10, 0xe3, 0x95, 0xb0, 0xd1, 0xc0, 0x4e, 0xc3, 0x0f, 0xb5, - 0x41, 0x7b, 0x1a, 0x86, 0x77, 0xc9, 0x7e, 0xe0, 0x7a, 0xdb, 0x49, 0xdb, 0x9d, 0x1b, 0xbc, 0x18, - 0x4b, 0xb8, 0x99, 0xa9, 0x6b, 0xf8, 0xb8, 0x73, 0xde, 0x8e, 0xf4, 0xbc, 0x6b, 0x7f, 0xa0, 0x08, - 0x93, 0x78, 0xa9, 0xfc, 0xde, 0x44, 0xdc, 0x4e, 0x4f, 0xc4, 0x71, 0xe7, 0xbc, 0xed, 0x3d, 0x1b, - 0x3f, 0x6f, 0xc1, 0x24, 0x4b, 0x88, 0x21, 0xc2, 0x62, 0xb9, 0xbe, 0x77, 0x02, 0x7c, 0xed, 0x45, - 0x18, 0x0c, 0x68, 0xa3, 0xc9, 0xbc, 0x8e, 0xac, 0x27, 0x98, 0xc3, 0xd0, 0x13, 0x30, 0xc0, 0xba, - 0x40, 0x27, 0x6f, 0x8c, 0xa7, 0xc4, 0x2a, 0x3b, 0x91, 0x83, 0x59, 0x29, 0x8b, 0xf5, 0x82, 0x49, - 0xbb, 0xe9, 0xf2, 0x4e, 0xc7, 0xca, 0xd4, 0x77, 0x87, 0xeb, 0x76, 0x66, 0xd7, 0xde, 0x59, 0xac, - 0x97, 0x6c, 0x92, 0xdd, 0xdf, 0x8c, 0x7f, 0x54, 0x80, 0xf3, 0x99, 0xf5, 0xfa, 0x8e, 0xf5, 0xd2, - 0xbd, 0xf6, 0xa3, 0x4c, 0x79, 0x50, 0x3c, 0x41, 0xcb, 0xc8, 0x81, 0x7e, 0x59, 0xd9, 0xc1, 0x3e, - 0x42, 0xb0, 0x64, 0x0e, 0xd9, 0xbb, 0x24, 0x04, 0x4b, 0x66, 0xdf, 0x72, 0xde, 0xbc, 0x7f, 0x51, - 0xc8, 0xf9, 0x16, 0xf6, 0xfa, 0xbd, 0x4c, 0xcf, 0x19, 0x06, 0x0c, 0xe5, 0x8b, 0x92, 0x9f, 0x31, - 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x84, 0xc9, 0x96, 0xeb, 0xd1, 0xc3, 0x67, 0xdf, 0xe4, 0x30, 0x55, - 0x84, 0xac, 0x35, 0x13, 0x8c, 0x93, 0xf8, 0xc8, 0xd5, 0xc2, 0xb3, 0x14, 0xf2, 0x33, 0xa5, 0xe7, - 0xf6, 0x76, 0xc1, 0x54, 0x34, 0xab, 0x51, 0xcc, 0x08, 0xd5, 0xb2, 0xa6, 0x09, 0x3d, 0x8a, 0xfd, - 0x0b, 0x3d, 0xc6, 0xb2, 0x05, 0x1e, 0x73, 0xaf, 0xc2, 0xf8, 0x43, 0x4b, 0xb9, 0xed, 0xaf, 0x16, - 0xe1, 0xf1, 0x2e, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, 0x1c, 0x68, 0x67, 0x7d, 0x6a, 0x1e, 0xaa, 0x70, - 0x6a, 0xab, 0xd3, 0x6c, 0xee, 0x33, 0x87, 0x01, 0xd2, 0x90, 0x18, 0x82, 0xa7, 0x94, 0x2f, 0xfd, - 0x53, 0xab, 0x19, 0x38, 0x38, 0xb3, 0x26, 0x7d, 0x39, 0xd0, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0xbc, - 0x1c, 0xb0, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x06, 0xd3, 0xce, 0x9e, 0xe3, 0xf2, 0x18, 0xb7, 0x92, - 0x00, 0x7f, 0x3a, 0x28, 0xe1, 0xe4, 0x62, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0xd7, 0x01, 0xf9, 0x9b, - 0xcc, 0xac, 0xb8, 0x71, 0x8d, 0x78, 0x42, 0x1f, 0xc8, 0xe6, 0xae, 0x18, 0x1f, 0x09, 0xb7, 0x52, - 0x18, 0x38, 0xa3, 0x56, 0x22, 0xdc, 0xc9, 0x50, 0x7e, 0xb8, 0x93, 0xee, 0xe7, 0x62, 0xcf, 0x6c, - 0x1b, 0xff, 0xc9, 0xa2, 0xd7, 0x17, 0x67, 0xf2, 0xcd, 0xa8, 0x7d, 0xaf, 0x32, 0x7b, 0x3e, 0x2e, - 0xb8, 0xd4, 0x82, 0x74, 0x9c, 0xd6, 0xec, 0xf9, 0x62, 0x20, 0x36, 0x71, 0xf9, 0x82, 0x08, 0x63, - 0xdf, 0x50, 0x83, 0xc5, 0x17, 0xa1, 0x85, 0x14, 0x06, 0xfa, 0x24, 0x0c, 0x37, 0xdc, 0x3d, 0x37, - 0x14, 0x62, 0x9b, 0x23, 0xeb, 0x48, 0xe2, 0x73, 0xb0, 0xcc, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, - 0x02, 0x8c, 0xcb, 0x16, 0xdf, 0xe8, 0xf8, 0x91, 0x73, 0x02, 0xd7, 0xf2, 0x35, 0xe3, 0x5a, 0xfe, - 0x40, 0xb7, 0xf8, 0x4a, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xad, 0xc4, 0x75, 0xfc, 0x54, 0x6f, 0x52, - 0xdd, 0xaf, 0xe1, 0x7f, 0x6c, 0xc1, 0xb4, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xaa, 0x79, 0x1b, 0x3c, - 0xd9, 0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2e, 0x26, 0xfa, 0xce, 0x4e, 0xff, 0xb7, 0x60, 0x60, - 0xc7, 0x09, 0x1a, 0xdd, 0xe2, 0xc9, 0xa7, 0x2a, 0x2d, 0x5c, 0x77, 0x02, 0xa1, 0x10, 0x7d, 0x56, - 0x25, 0x2a, 0x77, 0x82, 0xde, 0xca, 0x50, 0xd6, 0x14, 0x7a, 0x19, 0x86, 0xc2, 0xba, 0xdf, 0x56, - 0xee, 0x02, 0x17, 0x78, 0x12, 0x73, 0x5a, 0x72, 0x78, 0x30, 0x8f, 0xcc, 0xe6, 0x68, 0x31, 0x16, - 0xf8, 0xe8, 0x53, 0x30, 0xce, 0x7e, 0x29, 0xeb, 0xa4, 0x62, 0x7e, 0xee, 0xa9, 0x9a, 0x8e, 0xc8, - 0x4d, 0xf7, 0x8c, 0x22, 0x6c, 0x92, 0x9a, 0xdb, 0x86, 0x92, 0xfa, 0xac, 0x47, 0xaa, 0x84, 0xfc, - 0xf7, 0x45, 0x98, 0xc9, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x1d, 0xce, - 0x45, 0xc8, 0x5e, 0x43, 0x0d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x76, 0x48, 0x92, 0x8d, 0xd2, 0xa2, - 0xde, 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x8f, 0x74, 0x4e, 0xff, 0xb4, - 0x08, 0xa7, 0xb2, 0x42, 0xbe, 0xa1, 0xcf, 0x27, 0xb2, 0x19, 0xbe, 0xd8, 0x6f, 0xb0, 0x38, 0x9e, - 0xe2, 0x90, 0x0b, 0x9b, 0x97, 0x16, 0xcc, 0xfc, 0x86, 0x3d, 0x87, 0x59, 0xb4, 0xc9, 0xe2, 0x1e, - 0x04, 0x3c, 0x0b, 0xa5, 0x3c, 0x3e, 0x3e, 0xdc, 0x77, 0x07, 0x44, 0xfa, 0xca, 0x30, 0x61, 0xf9, - 0x20, 0x8b, 0x7b, 0x5b, 0x3e, 0xc8, 0x96, 0xe7, 0x5c, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, - 0x2e, 0xbd, 0xad, 0xb4, 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xc6, 0xf0, 0x4a, 0x2c, - 0x66, 0xe5, 0x8a, 0xc5, 0x2e, 0xc0, 0x40, 0xe0, 0x37, 0x49, 0x32, 0xed, 0x1f, 0xf6, 0x9b, 0x04, - 0x33, 0x08, 0xc5, 0x88, 0x62, 0x61, 0xc7, 0x98, 0xfe, 0x90, 0x13, 0x4f, 0xb4, 0x8b, 0x30, 0xd8, - 0x24, 0x7b, 0xa4, 0x99, 0xcc, 0xce, 0x72, 0x93, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0x7e, 0x00, 0xce, - 0x75, 0x8d, 0x1c, 0x42, 0x9f, 0x43, 0xdb, 0x4e, 0x44, 0xee, 0x39, 0xfb, 0xc9, 0x34, 0x0a, 0xd7, - 0x78, 0x31, 0x96, 0x70, 0xe6, 0xae, 0xc4, 0xa3, 0x21, 0x27, 0x84, 0x88, 0x22, 0x08, 0xb2, 0x80, - 0x9a, 0x42, 0xa9, 0xe2, 0x71, 0x08, 0xa5, 0x9e, 0x07, 0x08, 0xc3, 0x26, 0x37, 0x19, 0x6a, 0x08, - 0x3f, 0xa8, 0x38, 0x6a, 0x76, 0xed, 0xa6, 0x80, 0x60, 0x0d, 0x0b, 0x95, 0x61, 0xaa, 0x1d, 0xf8, - 0x11, 0x97, 0xc9, 0x96, 0xb9, 0x55, 0xdd, 0xa0, 0x19, 0xb4, 0xa1, 0x9a, 0x80, 0xe3, 0x54, 0x0d, - 0xf4, 0x12, 0x8c, 0x8a, 0x40, 0x0e, 0x55, 0xdf, 0x6f, 0x0a, 0x31, 0x90, 0x32, 0x34, 0xab, 0xc5, - 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x98, 0xa0, 0x77, 0x38, 0xb3, 0x1a, 0x17, 0xf6, 0x6a, 0x78, 0x89, - 0xf0, 0x8f, 0x23, 0x7d, 0x85, 0x7f, 0x8c, 0x05, 0x63, 0xa5, 0xbe, 0x95, 0x68, 0xd0, 0x53, 0x94, - 0xf4, 0x33, 0x03, 0x30, 0x23, 0x16, 0xce, 0xa3, 0x5e, 0x2e, 0xb7, 0xd3, 0xcb, 0xe5, 0x38, 0x44, - 0x67, 0xef, 0xad, 0x99, 0x93, 0x5e, 0x33, 0x3f, 0x68, 0x81, 0xc9, 0x5e, 0xa1, 0xff, 0x27, 0x37, - 0x0f, 0xcd, 0x4b, 0xb9, 0xec, 0x5a, 0x43, 0x5e, 0x20, 0xef, 0x30, 0x23, 0x8d, 0xfd, 0x1f, 0x2d, - 0x78, 0xb2, 0x27, 0x45, 0xb4, 0x02, 0x25, 0xc6, 0x03, 0x6a, 0xaf, 0xb3, 0xa7, 0x94, 0xd5, 0xad, - 0x04, 0xe4, 0xb0, 0xa4, 0x71, 0x4d, 0xb4, 0x92, 0x4a, 0xf8, 0xf3, 0x74, 0x46, 0xc2, 0x9f, 0xd3, - 0xc6, 0xf0, 0x3c, 0x64, 0xc6, 0x9f, 0xef, 0xa7, 0x37, 0x8e, 0xe1, 0xf1, 0x82, 0x3e, 0x6c, 0x88, - 0xfd, 0xec, 0x84, 0xd8, 0x0f, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x71, 0x98, 0x62, 0x11, 0x9e, 0x98, - 0x0d, 0xb8, 0xf0, 0xc5, 0x29, 0xc4, 0x76, 0x9e, 0x37, 0x13, 0x30, 0x9c, 0xc2, 0xb6, 0xff, 0xb0, - 0x08, 0x43, 0x7c, 0xfb, 0x9d, 0xc0, 0x9b, 0xf0, 0x19, 0x28, 0xb9, 0xad, 0x56, 0x87, 0xe7, 0x70, - 0x19, 0xe4, 0x0e, 0xb8, 0x74, 0x9e, 0x2a, 0xb2, 0x10, 0xc7, 0x70, 0xb4, 0x2a, 0x24, 0xce, 0x5d, - 0x82, 0x48, 0xf2, 0x8e, 0x2f, 0x94, 0x9d, 0xc8, 0xe1, 0x0c, 0x8e, 0xba, 0x67, 0x63, 0xd9, 0x34, - 0xfa, 0x0c, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0xcb, 0x44, 0xcc, 0xd4, 0x0f, 0x76, 0xa1, 0x56, - 0x53, 0xc8, 0x9c, 0x66, 0x7c, 0xe6, 0x28, 0x00, 0xd6, 0x28, 0xa2, 0x05, 0xe3, 0xa6, 0x9f, 0x4b, - 0xcc, 0x1d, 0x70, 0xaa, 0xf1, 0x9c, 0xcd, 0x7d, 0x04, 0x4a, 0x8a, 0x78, 0x2f, 0xf9, 0xd3, 0x98, - 0xce, 0x16, 0x7d, 0x0c, 0x26, 0x13, 0x7d, 0x3b, 0x92, 0xf8, 0xea, 0x17, 0x2c, 0x98, 0xe4, 0x9d, - 0x59, 0xf1, 0xf6, 0xc4, 0x6d, 0xf0, 0x36, 0x9c, 0x6a, 0x66, 0x9c, 0xca, 0x62, 0xfa, 0xfb, 0x3f, - 0xc5, 0x95, 0xb8, 0x2a, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, 0x8e, 0xa3, 0xa7, 0xae, 0xd3, - 0x14, 0xfe, 0xb8, 0x63, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6, 0xef, 0x58, 0x30, 0xcd, 0x7b, - 0x7e, 0x83, 0xec, 0xab, 0xb3, 0xe9, 0xeb, 0xd9, 0x77, 0x91, 0x3d, 0xac, 0x90, 0x93, 0x3d, 0x4c, - 0xff, 0xb4, 0x62, 0xd7, 0x4f, 0xfb, 0x69, 0x0b, 0xc4, 0x0a, 0x39, 0x01, 0x21, 0xc4, 0xb7, 0x9a, - 0x42, 0x88, 0xb9, 0xfc, 0x4d, 0x90, 0x23, 0x7d, 0xf8, 0x73, 0x0b, 0xa6, 0x38, 0x42, 0xac, 0x2d, - 0xff, 0xba, 0xce, 0x43, 0x3f, 0x39, 0x86, 0x6f, 0x90, 0xfd, 0x0d, 0xbf, 0xea, 0x44, 0x3b, 0xd9, - 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xd7, 0xc9, 0x6a, 0xc8, 0x0d, 0x74, 0x84, 0xc4, 0xe5, 0x47, 0x4e, - 0xae, 0x61, 0x7f, 0xcd, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca, 0x0e, 0xb1, 0x52, 0xed, 0xa2, - 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0xb1, 0x0c, 0x4f, 0xc2, 0xe4, 0xa1, 0xd8, 0xdb, 0xe4, - 0xe1, 0x08, 0x23, 0xfa, 0xaf, 0x87, 0x20, 0xe9, 0xf5, 0x83, 0xee, 0xc0, 0x58, 0xdd, 0x69, 0x3b, - 0x9b, 0x6e, 0xd3, 0x8d, 0x5c, 0x12, 0x76, 0x33, 0xca, 0x5a, 0xd6, 0xf0, 0x84, 0x92, 0x5a, 0x2b, - 0xc1, 0x06, 0x1d, 0xb4, 0x00, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x49, 0xb6, 0x99, 0xac, 0x84, 0x45, - 0x00, 0xe0, 0x96, 0x46, 0xb2, 0x14, 0x6b, 0x18, 0x19, 0x2e, 0xd6, 0xc5, 0x47, 0xec, 0x62, 0x0d, - 0x27, 0xe6, 0x62, 0x3d, 0x70, 0x24, 0x17, 0xeb, 0x91, 0x23, 0xbb, 0x58, 0x0f, 0xf6, 0xe5, 0x62, - 0x8d, 0xe1, 0x8c, 0xe4, 0x3d, 0xe9, 0xff, 0x55, 0xb7, 0x49, 0xc4, 0x83, 0x83, 0x87, 0x2d, 0x98, - 0x7b, 0x70, 0x30, 0x7f, 0x06, 0x67, 0x62, 0xe0, 0x9c, 0x9a, 0xe8, 0x13, 0x30, 0xeb, 0x34, 0x9b, - 0xfe, 0x3d, 0x35, 0xa9, 0x2b, 0x61, 0xdd, 0x69, 0x72, 0x25, 0xc4, 0x30, 0xa3, 0xfa, 0xc4, 0x83, - 0x83, 0xf9, 0xd9, 0xc5, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0x7d, 0x14, 0x4a, 0xed, 0xc0, 0xaf, 0xaf, - 0x69, 0xae, 0x89, 0xe7, 0xe9, 0x00, 0x56, 0x65, 0xe1, 0xe1, 0xc1, 0xfc, 0xb8, 0xfa, 0xc3, 0x2e, - 0xfc, 0xb8, 0x42, 0x86, 0xcf, 0xf4, 0xe8, 0xb1, 0xfa, 0x4c, 0xef, 0xc2, 0x4c, 0x8d, 0x04, 0x2e, - 0x4b, 0x73, 0xde, 0x88, 0xcf, 0xa7, 0x0d, 0x28, 0x05, 0x89, 0x13, 0xb9, 0xaf, 0xc0, 0x8e, 0x5a, - 0x96, 0x03, 0x79, 0x02, 0xc7, 0x84, 0xec, 0xff, 0x65, 0xc1, 0xb0, 0xf0, 0xf2, 0x39, 0x01, 0xae, - 0x71, 0xd1, 0xd0, 0x24, 0xcc, 0x67, 0x0f, 0x18, 0xeb, 0x4c, 0xae, 0x0e, 0xa1, 0x92, 0xd0, 0x21, - 0x3c, 0xd9, 0x8d, 0x48, 0x77, 0xed, 0xc1, 0x5f, 0x2b, 0x52, 0xee, 0xdd, 0xf0, 0x37, 0x7d, 0xf4, - 0x43, 0xb0, 0x0e, 0xc3, 0xa1, 0xf0, 0x77, 0x2c, 0xe4, 0x1b, 0xe8, 0x27, 0x27, 0x31, 0xb6, 0x63, - 0x13, 0x1e, 0x8e, 0x92, 0x48, 0xa6, 0x23, 0x65, 0xf1, 0x11, 0x3a, 0x52, 0xf6, 0xf2, 0xc8, 0x1d, - 0x38, 0x0e, 0x8f, 0x5c, 0xfb, 0x2b, 0xec, 0xe6, 0xd4, 0xcb, 0x4f, 0x80, 0xa9, 0xba, 0x66, 0xde, - 0xb1, 0x76, 0x97, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0x5c, 0xfd, 0x9c, 0x05, 0xe7, 0x32, 0xbe, 0x4a, - 0xe3, 0xb4, 0x9e, 0x85, 0x11, 0xa7, 0xd3, 0x70, 0xd5, 0x5e, 0xd6, 0xf4, 0x89, 0x8b, 0xa2, 0x1c, - 0x2b, 0x0c, 0xb4, 0x0c, 0xd3, 0xe4, 0x7e, 0xdb, 0xe5, 0xaa, 0x54, 0xdd, 0xaa, 0xb5, 0xc8, 0x5d, - 0xc3, 0x56, 0x92, 0x40, 0x9c, 0xc6, 0x57, 0x51, 0x50, 0x8a, 0xb9, 0x51, 0x50, 0xfe, 0x9e, 0x05, - 0xa3, 0xca, 0xe3, 0xef, 0x91, 0x8f, 0xf6, 0xc7, 0xcd, 0xd1, 0x7e, 0xbc, 0xcb, 0x68, 0xe7, 0x0c, - 0xf3, 0x6f, 0x15, 0x54, 0x7f, 0xab, 0x7e, 0x10, 0xf5, 0xc1, 0xc1, 0x3d, 0xbc, 0x1d, 0xfe, 0x55, - 0x18, 0x75, 0xda, 0x6d, 0x09, 0x90, 0x36, 0x68, 0x2c, 0x4c, 0x6f, 0x5c, 0x8c, 0x75, 0x1c, 0xe5, - 0x16, 0x50, 0xcc, 0x75, 0x0b, 0x68, 0x00, 0x44, 0x4e, 0xb0, 0x4d, 0x22, 0x5a, 0x26, 0x22, 0x96, - 0xe5, 0x9f, 0x37, 0x9d, 0xc8, 0x6d, 0x2e, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0x42, 0xc5, 0x8b, 0x6e, - 0x05, 0xfc, 0x09, 0xa9, 0x85, 0x04, 0x52, 0xb4, 0xb0, 0x46, 0x57, 0x7a, 0xb7, 0xb3, 0x36, 0x06, - 0x4d, 0x63, 0x86, 0x75, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x11, 0x76, 0xfb, 0xb0, 0x31, 0x3d, 0x5a, - 0x0c, 0x9d, 0xff, 0x3a, 0xa6, 0x66, 0x83, 0x69, 0x32, 0xcb, 0x7a, 0xa4, 0x9e, 0xee, 0x87, 0x3d, - 0x6d, 0x58, 0x77, 0x52, 0x8b, 0xc3, 0xf9, 0xa0, 0x6f, 0x4b, 0x19, 0xa8, 0x3c, 0xd7, 0xe3, 0xd6, - 0x38, 0x82, 0x49, 0x0a, 0xcb, 0xd9, 0xc1, 0x32, 0x1a, 0x54, 0xaa, 0x62, 0x5f, 0x68, 0x39, 0x3b, - 0x04, 0x00, 0xc7, 0x38, 0x94, 0x99, 0x52, 0x7f, 0xc2, 0x59, 0x14, 0xc7, 0xae, 0x54, 0xd8, 0x21, - 0xd6, 0x30, 0xd0, 0x15, 0x21, 0x50, 0xe0, 0x7a, 0x81, 0xc7, 0x13, 0x02, 0x05, 0x39, 0x5c, 0x9a, - 0x14, 0xe8, 0x2a, 0x8c, 0xaa, 0xb4, 0xbd, 0x55, 0x9e, 0x0d, 0x56, 0x2c, 0xb3, 0x95, 0xb8, 0x18, - 0xeb, 0x38, 0x68, 0x03, 0x26, 0x43, 0x2e, 0x67, 0x53, 0x01, 0x85, 0xb9, 0xbc, 0xf2, 0x83, 0xd2, - 0x0a, 0xa8, 0x66, 0x82, 0x0f, 0x59, 0x11, 0x3f, 0x9d, 0xa4, 0x07, 0x7a, 0x92, 0x04, 0x7a, 0x0d, - 0x26, 0x9a, 0xbe, 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x67, 0xe3, 0x33, 0x62, 0x66, 0x7f, 0xbc, - 0x69, 0x40, 0x71, 0x02, 0x9b, 0x32, 0x6f, 0x7a, 0x89, 0x08, 0x82, 0xed, 0x78, 0xdb, 0x24, 0x14, - 0x49, 0x58, 0x19, 0xf3, 0x76, 0x33, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x2f, 0xc3, 0x98, 0xfc, 0x7c, - 0x2d, 0x60, 0x43, 0xec, 0x61, 0xa1, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x69, 0xf9, 0x7f, 0x23, - 0x70, 0xb6, 0xb6, 0xdc, 0xba, 0xf0, 0x62, 0xe6, 0xae, 0x98, 0x8b, 0xd2, 0x5f, 0x70, 0x25, 0x0b, - 0xe9, 0xf0, 0x60, 0xfe, 0x82, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0x66, - 0x76, 0x88, 0xd3, 0x8c, 0x76, 0x96, 0x77, 0x48, 0x7d, 0x57, 0x6e, 0x3a, 0x16, 0x06, 0x42, 0xf3, - 0x4b, 0xb8, 0x9e, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x9b, 0x30, 0xdb, 0xee, 0x6c, 0x36, 0xdd, 0x70, - 0x67, 0xdd, 0x8f, 0x98, 0x29, 0x90, 0xca, 0x02, 0x2c, 0xe2, 0x45, 0xa8, 0x40, 0x1b, 0xd5, 0x1c, - 0x3c, 0x9c, 0x4b, 0x01, 0xbd, 0x0d, 0xa7, 0x13, 0x8b, 0x41, 0x78, 0xcc, 0x4f, 0xe4, 0xa7, 0x14, - 0xa8, 0x65, 0x55, 0x10, 0xc1, 0x27, 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xfa, 0xf8, 0xd0, 0x62, 0xb8, - 0x86, 0xb3, 0x53, 0xb1, 0xcd, 0xb2, 0x16, 0xe8, 0x35, 0xc4, 0x06, 0x16, 0x7a, 0x05, 0xc0, 0x6d, - 0xaf, 0x3a, 0x2d, 0xb7, 0x49, 0x1f, 0x99, 0x33, 0xac, 0x0e, 0x7d, 0x70, 0x40, 0xa5, 0x2a, 0x4b, - 0xe9, 0xa9, 0x2e, 0xfe, 0xed, 0x63, 0x0d, 0x1b, 0x55, 0x61, 0x42, 0xfc, 0xdb, 0x17, 0x8b, 0x61, - 0x5a, 0xb9, 0xb4, 0x4f, 0xc8, 0x1a, 0x6a, 0x05, 0x20, 0xb3, 0x84, 0xcd, 0x79, 0xa2, 0x3e, 0xda, - 0x86, 0x73, 0x22, 0xcd, 0x34, 0xd1, 0x57, 0xb7, 0x9c, 0xbd, 0x90, 0x65, 0x00, 0x18, 0xe1, 0xce, - 0x12, 0x8b, 0xdd, 0x10, 0x71, 0x77, 0x3a, 0x94, 0x2b, 0xd0, 0x37, 0x09, 0x77, 0x22, 0x3d, 0xcd, - 0x8d, 0x9a, 0x28, 0x57, 0x70, 0x33, 0x09, 0xc4, 0x69, 0x7c, 0x14, 0xc2, 0x69, 0xd7, 0xcb, 0xda, - 0x13, 0x67, 0x18, 0xa1, 0x8f, 0x71, 0xff, 0xd9, 0xee, 0xfb, 0x21, 0x13, 0xce, 0xf7, 0x43, 0x26, - 0xed, 0x77, 0x66, 0xbb, 0xf7, 0xdb, 0x16, 0xad, 0xad, 0xf1, 0xf7, 0xe8, 0xb3, 0x30, 0xa6, 0x7f, - 0x98, 0xe0, 0x55, 0x2e, 0x65, 0xb3, 0xbf, 0xda, 0xa9, 0xc2, 0x5f, 0x07, 0xea, 0xe4, 0xd0, 0x61, - 0xd8, 0xa0, 0x88, 0xea, 0x19, 0x9e, 0xe6, 0x57, 0xfa, 0xe3, 0x85, 0xfa, 0x37, 0x5d, 0x23, 0x90, - 0xbd, 0x59, 0xd0, 0x4d, 0x18, 0xa9, 0x37, 0x5d, 0xe2, 0x45, 0x95, 0x6a, 0xb7, 0xd8, 0x70, 0xcb, - 0x02, 0x47, 0xec, 0x3e, 0x11, 0xd0, 0x9f, 0x97, 0x61, 0x45, 0xc1, 0xfe, 0xd5, 0x02, 0xcc, 0xf7, - 0xc8, 0x0e, 0x91, 0x50, 0x64, 0x59, 0x7d, 0x29, 0xb2, 0x16, 0x65, 0x82, 0xec, 0xf5, 0x84, 0x8c, - 0x2c, 0x91, 0xfc, 0x3a, 0x96, 0x94, 0x25, 0xf1, 0xfb, 0x76, 0x2c, 0xd0, 0x75, 0x61, 0x03, 0x3d, - 0x5d, 0x63, 0x0c, 0x1d, 0xf8, 0x60, 0xff, 0x0f, 0xe7, 0x5c, 0x7d, 0xa6, 0xfd, 0x95, 0x02, 0x9c, - 0x56, 0x43, 0xf8, 0xcd, 0x3b, 0x70, 0xb7, 0xd3, 0x03, 0x77, 0x0c, 0xda, 0x60, 0xfb, 0x16, 0x0c, - 0xf1, 0x60, 0x77, 0x7d, 0x30, 0xec, 0x17, 0xcd, 0xb8, 0xb0, 0x8a, 0x47, 0x34, 0x62, 0xc3, 0x7e, - 0xaf, 0x05, 0x93, 0x1b, 0xcb, 0xd5, 0x9a, 0x5f, 0xdf, 0x25, 0xd1, 0x22, 0x7f, 0x60, 0x61, 0xcd, - 0x27, 0xf7, 0x61, 0x98, 0xea, 0x2c, 0x76, 0xfd, 0x02, 0x0c, 0xec, 0xf8, 0x61, 0x94, 0x34, 0x15, - 0xb9, 0xee, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x77, 0x2d, 0x18, 0xdc, 0x70, 0x5c, 0x2f, 0x92, 0x6a, - 0x05, 0x2b, 0x47, 0xad, 0xd0, 0xcf, 0x77, 0xa1, 0x97, 0x60, 0x88, 0x6c, 0x6d, 0x91, 0x7a, 0x24, - 0x66, 0x55, 0x06, 0x34, 0x18, 0x5a, 0x61, 0xa5, 0x94, 0x83, 0x64, 0x8d, 0xf1, 0xbf, 0x58, 0x20, - 0xa3, 0xbb, 0x50, 0x8a, 0xdc, 0x16, 0x59, 0x6c, 0x34, 0x84, 0xb2, 0xfd, 0x21, 0x82, 0x32, 0x6c, - 0x48, 0x02, 0x38, 0xa6, 0x65, 0x7f, 0xa9, 0x00, 0x10, 0x47, 0x09, 0xea, 0xf5, 0x89, 0x4b, 0x29, - 0x35, 0xec, 0xa5, 0x0c, 0x35, 0x2c, 0x8a, 0x09, 0x66, 0xe8, 0x60, 0xd5, 0x30, 0x15, 0xfb, 0x1a, - 0xa6, 0x81, 0xa3, 0x0c, 0xd3, 0x32, 0x4c, 0xc7, 0x51, 0x8e, 0xcc, 0x20, 0x6f, 0xec, 0xfa, 0xdc, - 0x48, 0x02, 0x71, 0x1a, 0xdf, 0x26, 0x70, 0x41, 0x05, 0x7b, 0x11, 0x37, 0x1a, 0xb3, 0xe5, 0xd6, - 0xd5, 0xda, 0x3d, 0xc6, 0x29, 0xd6, 0x33, 0x17, 0x72, 0xf5, 0xcc, 0x3f, 0x61, 0xc1, 0xa9, 0x64, - 0x3b, 0xcc, 0x8b, 0xf7, 0x8b, 0x16, 0x9c, 0x66, 0xda, 0x76, 0xd6, 0x6a, 0x5a, 0xb7, 0xff, 0x62, - 0xd7, 0x00, 0x36, 0x39, 0x3d, 0x8e, 0x23, 0x67, 0xac, 0x65, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, - 0xa1, 0x00, 0xb3, 0x79, 0x91, 0x6f, 0x98, 0xab, 0x87, 0x73, 0xbf, 0xb6, 0x4b, 0xee, 0x09, 0x83, - 0xfa, 0xd8, 0xd5, 0x83, 0x17, 0x63, 0x09, 0x4f, 0x06, 0xfc, 0x2f, 0xf4, 0x19, 0xf0, 0x7f, 0x07, - 0xa6, 0xef, 0xed, 0x10, 0xef, 0xb6, 0x17, 0x3a, 0x91, 0x1b, 0x6e, 0xb9, 0x4c, 0x33, 0xcd, 0xd7, - 0xcd, 0x2b, 0xd2, 0xec, 0xfd, 0x6e, 0x12, 0xe1, 0xf0, 0x60, 0xfe, 0x9c, 0x51, 0x10, 0x77, 0x99, - 0x1f, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x2f, 0x61, 0xe0, 0x11, 0xe6, 0x4b, 0xb0, 0xbf, 0x68, 0xc1, - 0xd9, 0xdc, 0x24, 0xad, 0xe8, 0x32, 0x8c, 0x38, 0x6d, 0x97, 0x0b, 0xf7, 0xc5, 0x31, 0xca, 0x84, - 0x48, 0xd5, 0x0a, 0x17, 0xed, 0x2b, 0xa8, 0x4a, 0x1e, 0x5f, 0xc8, 0x4d, 0x1e, 0xdf, 0x33, 0x17, - 0xbc, 0xfd, 0x3d, 0x16, 0x08, 0x37, 0xd5, 0x3e, 0xce, 0xee, 0x4f, 0xc1, 0xd8, 0x5e, 0x3a, 0xa7, - 0xd2, 0x85, 0x7c, 0xbf, 0x5d, 0x91, 0x49, 0x49, 0x31, 0x64, 0x46, 0xfe, 0x24, 0x83, 0x96, 0xdd, - 0x00, 0x01, 0x2d, 0x13, 0x26, 0xba, 0xee, 0xdd, 0x9b, 0xe7, 0x01, 0x1a, 0x0c, 0x57, 0xcb, 0xc0, - 0xaf, 0x6e, 0xe6, 0xb2, 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0xb7, 0x05, 0x18, 0x95, 0x39, 0x7c, 0x3a, - 0x5e, 0x3f, 0x02, 0xa6, 0x23, 0x25, 0xf5, 0x44, 0x57, 0xa0, 0xc4, 0x24, 0xa0, 0xd5, 0x58, 0x2e, - 0xa7, 0xe4, 0x0f, 0x6b, 0x12, 0x80, 0x63, 0x1c, 0xba, 0x8b, 0xc2, 0xce, 0x26, 0x43, 0x4f, 0x38, - 0x55, 0xd6, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x09, 0x98, 0xe2, 0xf5, 0x02, 0xbf, 0xed, 0x6c, 0x73, - 0xad, 0xc9, 0xa0, 0x0a, 0xbb, 0x30, 0xb5, 0x96, 0x80, 0x1d, 0x1e, 0xcc, 0x9f, 0x4a, 0x96, 0x31, - 0x75, 0x60, 0x8a, 0x0a, 0x33, 0x8e, 0xe2, 0x8d, 0xd0, 0xdd, 0x9f, 0xb2, 0xa9, 0x8a, 0x41, 0x58, - 0xc7, 0xb3, 0x3f, 0x0b, 0x28, 0x9d, 0xcd, 0x08, 0xbd, 0xce, 0x2d, 0x62, 0xdd, 0x80, 0x34, 0xba, - 0xa9, 0x07, 0xf5, 0xe0, 0x02, 0xd2, 0x1f, 0x8a, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xff, 0x17, 0x61, - 0x2a, 0xe9, 0x01, 0x8e, 0xae, 0xc3, 0x10, 0x67, 0x3d, 0x04, 0xf9, 0x2e, 0xd6, 0x27, 0x9a, 0xdf, - 0x38, 0x3b, 0x84, 0x05, 0xf7, 0x22, 0xea, 0xa3, 0x37, 0x61, 0xb4, 0xe1, 0xdf, 0xf3, 0xee, 0x39, - 0x41, 0x63, 0xb1, 0x5a, 0x11, 0xcb, 0x39, 0xf3, 0x39, 0x5c, 0x8e, 0xd1, 0x74, 0x5f, 0x74, 0xa6, - 0x69, 0x8d, 0x41, 0x58, 0x27, 0x87, 0x36, 0x58, 0x08, 0xf4, 0x2d, 0x77, 0x7b, 0xcd, 0x69, 0x77, - 0x73, 0x8f, 0x58, 0x96, 0x48, 0x1a, 0xe5, 0x71, 0x11, 0x27, 0x9d, 0x03, 0x70, 0x4c, 0x08, 0x7d, - 0x1e, 0x66, 0xc2, 0x1c, 0x21, 0x7d, 0x5e, 0x72, 0xbb, 0x6e, 0x72, 0xeb, 0xa5, 0xc7, 0x1e, 0x1c, - 0xcc, 0xcf, 0x64, 0x89, 0xf3, 0xb3, 0x9a, 0xb1, 0x7f, 0xe4, 0x14, 0x18, 0x9b, 0xd8, 0xc8, 0x75, - 0x6a, 0x1d, 0x53, 0xae, 0x53, 0x0c, 0x23, 0xa4, 0xd5, 0x8e, 0xf6, 0xcb, 0x6e, 0xd0, 0x2d, 0x03, - 0xf8, 0x8a, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0x64, 0x27, 0xa4, 0x2d, 0x7e, 0x1d, 0x13, - 0xd2, 0x0e, 0x9c, 0x60, 0x42, 0xda, 0x75, 0x18, 0xde, 0x76, 0x23, 0x4c, 0xda, 0xbe, 0x60, 0xfa, - 0x33, 0xd7, 0xe1, 0x35, 0x8e, 0x92, 0x4e, 0x7d, 0x28, 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x1d, - 0x38, 0x94, 0xff, 0x30, 0x4f, 0x9b, 0x49, 0x64, 0xee, 0x41, 0x91, 0x76, 0x76, 0xf8, 0x61, 0xd3, - 0xce, 0xae, 0xca, 0x64, 0xb1, 0x23, 0xf9, 0xbe, 0x4c, 0x2c, 0x17, 0x6c, 0x8f, 0x14, 0xb1, 0x77, - 0xf4, 0x04, 0xbb, 0xa5, 0xfc, 0x93, 0x40, 0xe5, 0xce, 0xed, 0x33, 0xad, 0xee, 0xf7, 0x58, 0x70, - 0xba, 0x9d, 0x95, 0x6b, 0x5a, 0x58, 0x14, 0xbc, 0xd4, 0x77, 0x3a, 0x6b, 0xa3, 0x41, 0x26, 0x89, - 0xcb, 0x44, 0xc3, 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x1b, 0x42, 0xb3, 0x7d, 0x31, 0x27, 0x3f, - 0x6f, 0x97, 0xac, 0xbc, 0x1b, 0x19, 0xb9, 0x60, 0xdf, 0x9f, 0x97, 0x0b, 0xb6, 0xef, 0x0c, 0xb0, - 0xaf, 0xab, 0xcc, 0xbc, 0xe3, 0xf9, 0x4b, 0x89, 0xe7, 0xdd, 0xed, 0x99, 0x8f, 0xf7, 0x75, 0x95, - 0x8f, 0xb7, 0x4b, 0x7c, 0x5b, 0x9e, 0x6d, 0xb7, 0x67, 0x16, 0x5e, 0x2d, 0x93, 0xee, 0xe4, 0xf1, - 0x64, 0xd2, 0x35, 0xae, 0x1a, 0x9e, 0xcc, 0xf5, 0x99, 0x1e, 0x57, 0x8d, 0x41, 0xb7, 0xfb, 0x65, - 0xc3, 0xb3, 0x06, 0x4f, 0x3f, 0x54, 0xd6, 0xe0, 0x3b, 0x7a, 0x16, 0x5e, 0xd4, 0x23, 0xcd, 0x2c, - 0x45, 0xea, 0x33, 0xf7, 0xee, 0x1d, 0xfd, 0x02, 0x9c, 0xc9, 0xa7, 0xab, 0xee, 0xb9, 0x34, 0xdd, - 0xcc, 0x2b, 0x30, 0x95, 0xd3, 0xf7, 0xd4, 0xc9, 0xe4, 0xf4, 0x3d, 0x7d, 0xec, 0x39, 0x7d, 0xcf, - 0x9c, 0x40, 0x4e, 0xdf, 0xc7, 0x4e, 0x30, 0xa7, 0xef, 0x1d, 0x66, 0x86, 0xc3, 0x83, 0xfd, 0x88, - 0x78, 0xbc, 0xd9, 0xb1, 0x5f, 0xb3, 0x22, 0x02, 0xf1, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0xca, 0xc8, - 0x15, 0x3c, 0xfb, 0x08, 0x72, 0x05, 0xaf, 0xc7, 0xb9, 0x82, 0xcf, 0xe6, 0x4f, 0x75, 0x86, 0xe3, - 0x46, 0x4e, 0x86, 0xe0, 0x3b, 0x7a, 0x66, 0xdf, 0xc7, 0xbb, 0xe8, 0x5a, 0xb2, 0x04, 0x8f, 0x5d, - 0xf2, 0xf9, 0xbe, 0xc6, 0xf3, 0xf9, 0x3e, 0x91, 0x7f, 0x92, 0x27, 0xaf, 0x3b, 0x23, 0x8b, 0x2f, - 0xed, 0x97, 0x8a, 0xfc, 0xc8, 0x22, 0x0f, 0xe7, 0xf4, 0x4b, 0x85, 0x8e, 0x4c, 0xf7, 0x4b, 0x81, - 0x70, 0x4c, 0xca, 0xfe, 0xbe, 0x02, 0x9c, 0xef, 0xbe, 0xdf, 0x62, 0x69, 0x6a, 0x35, 0x56, 0x3d, - 0x27, 0xa4, 0xa9, 0xfc, 0xcd, 0x16, 0x63, 0xf5, 0x1d, 0xc8, 0xee, 0x1a, 0x4c, 0x2b, 0x8f, 0x8f, - 0xa6, 0x5b, 0xdf, 0x5f, 0x8f, 0x5f, 0xbe, 0xca, 0x4b, 0xbe, 0x96, 0x44, 0xc0, 0xe9, 0x3a, 0x68, - 0x11, 0x26, 0x8d, 0xc2, 0x4a, 0x59, 0xbc, 0xcd, 0x94, 0xf8, 0xb6, 0x66, 0x82, 0x71, 0x12, 0xdf, - 0xfe, 0xb2, 0x05, 0x8f, 0xe5, 0x24, 0xc3, 0xeb, 0x3b, 0x4e, 0xdb, 0x16, 0x4c, 0xb6, 0xcd, 0xaa, - 0x3d, 0x42, 0x4b, 0x1a, 0x29, 0xf7, 0x54, 0x5f, 0x13, 0x00, 0x9c, 0x24, 0x6a, 0xff, 0x99, 0x05, - 0xe7, 0xba, 0x9a, 0x30, 0x22, 0x0c, 0x67, 0xb6, 0x5b, 0xa1, 0xb3, 0x1c, 0x90, 0x06, 0xf1, 0x22, - 0xd7, 0x69, 0xd6, 0xda, 0xa4, 0xae, 0xc9, 0xc3, 0x99, 0x2d, 0xe0, 0xb5, 0xb5, 0xda, 0x62, 0x1a, - 0x03, 0xe7, 0xd4, 0x44, 0xab, 0x80, 0xd2, 0x10, 0x31, 0xc3, 0x2c, 0x86, 0x75, 0x9a, 0x1e, 0xce, - 0xa8, 0x81, 0x3e, 0x02, 0xe3, 0xca, 0x34, 0x52, 0x9b, 0x71, 0x76, 0xb0, 0x63, 0x1d, 0x80, 0x4d, - 0xbc, 0xa5, 0xcb, 0xbf, 0xfe, 0xfb, 0xe7, 0xdf, 0xf7, 0x9b, 0xbf, 0x7f, 0xfe, 0x7d, 0xbf, 0xf3, - 0xfb, 0xe7, 0xdf, 0xf7, 0x1d, 0x0f, 0xce, 0x5b, 0xbf, 0xfe, 0xe0, 0xbc, 0xf5, 0x9b, 0x0f, 0xce, - 0x5b, 0xbf, 0xf3, 0xe0, 0xbc, 0xf5, 0x7b, 0x0f, 0xce, 0x5b, 0x5f, 0xfa, 0x83, 0xf3, 0xef, 0xfb, - 0x54, 0x61, 0xef, 0xea, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x6a, 0x19, 0xeb, 0xbc, 0x04, - 0x01, 0x00, + // 14240 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x70, 0x24, 0xd7, + 0x79, 0x18, 0xae, 0x9e, 0xc1, 0x35, 0x1f, 0xee, 0xb7, 0x07, 0xb1, 0x20, 0x77, 0xb1, 0x6c, 0x4a, + 0xcb, 0xa5, 0x48, 0x62, 0xb5, 0x3c, 0x24, 0x9a, 0x94, 0x68, 0x01, 0x18, 0x60, 0x17, 0xdc, 0x05, + 0x76, 0xf8, 0x06, 0xbb, 0x2b, 0xc9, 0x94, 0x4a, 0x8d, 0x99, 0x07, 0xa0, 0x85, 0x99, 0xee, 0x61, + 0x77, 0x0f, 0x76, 0xc1, 0x9f, 0x5c, 0x3f, 0x47, 0x3e, 0xe5, 0x23, 0xa5, 0x4a, 0x39, 0x47, 0xc9, + 0x2e, 0x57, 0xca, 0x71, 0x62, 0x2b, 0xca, 0xe5, 0xc8, 0xb1, 0x1d, 0xcb, 0x89, 0x9d, 0xdb, 0xc9, + 0x1f, 0xb6, 0xe3, 0x4a, 0x2c, 0x57, 0xb9, 0x82, 0xd8, 0xeb, 0x54, 0xb9, 0x54, 0x95, 0xd8, 0x4e, + 0x9c, 0xfc, 0x91, 0x8d, 0x2b, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x6b, 0x06, 0x4b, 0x2c, 0x44, 0xa9, + 0xf8, 0xdf, 0xcc, 0xfb, 0xbe, 0xf7, 0xbd, 0xd7, 0xef, 0xfc, 0xde, 0x77, 0xc2, 0x2b, 0xbb, 0x2f, + 0x85, 0xf3, 0xae, 0x7f, 0x69, 0xb7, 0xbb, 0x49, 0x02, 0x8f, 0x44, 0x24, 0xbc, 0xb4, 0x47, 0xbc, + 0xa6, 0x1f, 0x5c, 0x12, 0x00, 0xa7, 0xe3, 0x5e, 0x6a, 0xf8, 0x01, 0xb9, 0xb4, 0x77, 0xf9, 0xd2, + 0x36, 0xf1, 0x48, 0xe0, 0x44, 0xa4, 0x39, 0xdf, 0x09, 0xfc, 0xc8, 0x47, 0x88, 0xe3, 0xcc, 0x3b, + 0x1d, 0x77, 0x9e, 0xe2, 0xcc, 0xef, 0x5d, 0x9e, 0x7d, 0x76, 0xdb, 0x8d, 0x76, 0xba, 0x9b, 0xf3, + 0x0d, 0xbf, 0x7d, 0x69, 0xdb, 0xdf, 0xf6, 0x2f, 0x31, 0xd4, 0xcd, 0xee, 0x16, 0xfb, 0xc7, 0xfe, + 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x0b, 0x71, 0x33, 0x6d, 0xa7, 0xb1, 0xe3, 0x7a, 0x24, 0xd8, 0xbf, + 0xd4, 0xd9, 0xdd, 0x66, 0xed, 0x06, 0x24, 0xf4, 0xbb, 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, + 0x5e, 0x6a, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0xa5, 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, + 0xba, 0x99, 0x0f, 0xf6, 0xaa, 0x10, 0x36, 0x76, 0x48, 0xdb, 0x49, 0xd5, 0x7b, 0x3e, 0xaf, 0x5e, + 0x37, 0x72, 0x5b, 0x97, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x9a, 0x05, 0xe7, 0x17, + 0x6e, 0xd7, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, 0xad, 0x47, 0x7e, 0x40, + 0x6e, 0xf9, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0, 0x67, 0x60, 0x64, 0x8f, 0xfd, 0x5f, 0xad, + 0xce, 0x58, 0xe7, 0xad, 0x8b, 0x95, 0xc5, 0xa9, 0x5f, 0x3b, 0x98, 0x7b, 0xcf, 0xbd, 0x83, 0xb9, + 0x91, 0x5b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x01, 0x86, 0xb6, 0xc2, 0x8d, 0xfd, 0x0e, 0x99, 0x29, + 0x31, 0xdc, 0x09, 0x81, 0x3b, 0xb4, 0x52, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x25, 0xa8, 0x74, 0x9c, + 0x20, 0x72, 0x23, 0xd7, 0xf7, 0x66, 0xca, 0xe7, 0xad, 0x8b, 0x83, 0x8b, 0xd3, 0x02, 0xb5, 0x52, + 0x93, 0x00, 0x1c, 0xe3, 0xd0, 0x6e, 0x04, 0xc4, 0x69, 0xde, 0xf0, 0x5a, 0xfb, 0x33, 0x03, 0xe7, + 0xad, 0x8b, 0x23, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x62, 0x09, 0x46, 0x16, 0xb6, + 0xb6, 0x5c, 0xcf, 0x8d, 0xf6, 0xd1, 0x2d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, + 0x3e, 0x77, 0x7e, 0x3e, 0xbd, 0x94, 0xe6, 0xd7, 0x35, 0xbc, 0xc5, 0xa9, 0x7b, 0x07, 0x73, 0x63, + 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0xa3, 0x1d, 0xbf, 0xa9, 0xc8, 0x96, 0x18, 0xd9, 0xb9, 0x2c, + 0xb2, 0xb5, 0x18, 0x6d, 0x71, 0xf2, 0xde, 0xc1, 0xdc, 0xa8, 0x56, 0x80, 0x75, 0x22, 0x68, 0x13, + 0x26, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdd, 0x32, 0xa3, 0xfb, 0x44, 0x1e, 0x5d, 0x0d, 0x75, 0xf1, + 0xc4, 0xbd, 0x83, 0xb9, 0xc9, 0x44, 0x21, 0x4e, 0x12, 0xb4, 0xdf, 0x82, 0x89, 0x85, 0x28, 0x72, + 0x1a, 0x3b, 0xa4, 0xc9, 0x67, 0x10, 0xbd, 0x00, 0x03, 0x9e, 0xd3, 0x26, 0x62, 0x7e, 0xcf, 0x8b, + 0x81, 0x1d, 0x58, 0x77, 0xda, 0xe4, 0xfe, 0xc1, 0xdc, 0xd4, 0x4d, 0xcf, 0x7d, 0xb3, 0x2b, 0x56, + 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x3d, 0x07, 0xd0, 0x24, 0x7b, 0x6e, 0x83, 0xd4, 0x9c, 0x68, 0x47, + 0xcc, 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0xef, 0x42, 0x65, 0x61, 0xcf, 0x77, + 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x5d, 0x98, 0xec, 0x04, 0x64, 0x8b, 0x04, 0xaa, 0x68, 0xc6, 0x3a, + 0x5f, 0xbe, 0x38, 0xfa, 0xdc, 0xc5, 0xcc, 0x8f, 0x35, 0x51, 0x97, 0xbd, 0x28, 0xd8, 0x5f, 0x7c, + 0x44, 0xb4, 0x37, 0x99, 0x80, 0xe2, 0x24, 0x65, 0xfb, 0x5f, 0x96, 0xe0, 0xd4, 0xc2, 0x5b, 0xdd, + 0x80, 0x54, 0xdd, 0x70, 0x37, 0xb9, 0xc2, 0x9b, 0x6e, 0xb8, 0xbb, 0x1e, 0x8f, 0x80, 0x5a, 0x5a, + 0x55, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x16, 0x86, 0xe9, 0xef, 0x9b, 0x78, 0x55, 0x7c, 0xf2, 0x09, + 0x81, 0x3c, 0x5a, 0x75, 0x22, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x35, 0x18, 0x6d, 0xb0, 0x0d, + 0xb9, 0xbd, 0xe6, 0x37, 0x09, 0x9b, 0xcc, 0xca, 0xe2, 0xd3, 0x14, 0x7d, 0x29, 0x2e, 0xbe, 0x7f, + 0x30, 0x37, 0xc3, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, 0x03, 0x8c, + 0x12, 0x64, 0xec, 0xad, 0x8b, 0xda, 0x56, 0x19, 0x64, 0x5b, 0x65, 0x2c, 0x7b, 0x9b, 0xa0, 0xcb, + 0x30, 0xb0, 0xeb, 0x7a, 0xcd, 0x99, 0x21, 0x46, 0xeb, 0x2c, 0x9d, 0xf3, 0x6b, 0xae, 0xd7, 0xbc, + 0x7f, 0x30, 0x37, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0x73, 0x0c, 0xb6, + 0xe2, 0xb6, 0x48, 0x8d, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, + 0x8d, 0x80, 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x57, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, + 0x71, 0x02, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x2e, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, + 0x72, 0xaf, 0x03, 0x01, 0x7d, 0x04, 0x26, 0xe3, 0xc6, 0xc2, 0x8e, 0xd3, 0x90, 0x03, 0xc8, 0xb6, + 0x4c, 0xdd, 0x04, 0xe1, 0x24, 0xae, 0xfd, 0xb7, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xc3, 0xbf, + 0xd5, 0xfe, 0x45, 0x0b, 0x86, 0x17, 0x5d, 0xaf, 0xe9, 0x7a, 0xdb, 0xe8, 0xd3, 0x30, 0x42, 0xef, + 0xa6, 0xa6, 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x80, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, 0xd9, 0xdd, + 0xa6, 0x05, 0xe1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xc6, 0xe6, 0x67, 0x48, 0x23, 0x5a, 0x23, 0x91, + 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0x86, 0x22, 0x27, 0xd8, 0x26, 0x91, 0x38, + 0x00, 0x33, 0x0f, 0x2a, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x06, 0x89, 0xaf, 0x85, 0x0d, 0x56, + 0x15, 0x0b, 0x12, 0xf6, 0x8f, 0x0c, 0xc3, 0x99, 0xa5, 0xfa, 0x6a, 0xce, 0xba, 0xba, 0x00, 0x43, + 0xcd, 0xc0, 0xdd, 0x23, 0x81, 0x18, 0x67, 0x45, 0xa5, 0xca, 0x4a, 0xb1, 0x80, 0xa2, 0x97, 0x60, + 0x8c, 0x5f, 0x48, 0x57, 0x1d, 0xaf, 0xd9, 0x92, 0x43, 0x7c, 0x52, 0x60, 0x8f, 0xdd, 0xd2, 0x60, + 0xd8, 0xc0, 0x3c, 0xe4, 0xa2, 0xba, 0x90, 0xd8, 0x8c, 0x79, 0x97, 0xdd, 0xe7, 0x2d, 0x98, 0xe2, + 0xcd, 0x2c, 0x44, 0x51, 0xe0, 0x6e, 0x76, 0x23, 0x12, 0xce, 0x0c, 0xb2, 0x93, 0x6e, 0x29, 0x6b, + 0xb4, 0x72, 0x47, 0x60, 0xfe, 0x56, 0x82, 0x0a, 0x3f, 0x04, 0x67, 0x44, 0xbb, 0x53, 0x49, 0x30, + 0x4e, 0x35, 0x8b, 0xbe, 0xdb, 0x82, 0xd9, 0x86, 0xef, 0x45, 0x81, 0xdf, 0x6a, 0x91, 0xa0, 0xd6, + 0xdd, 0x6c, 0xb9, 0xe1, 0x0e, 0x5f, 0xa7, 0x98, 0x6c, 0xb1, 0x93, 0x20, 0x67, 0x0e, 0x15, 0x92, + 0x98, 0xc3, 0x73, 0xf7, 0x0e, 0xe6, 0x66, 0x97, 0x72, 0x49, 0xe1, 0x82, 0x66, 0xd0, 0x2e, 0x20, + 0x7a, 0x95, 0xd6, 0x23, 0x67, 0x9b, 0xc4, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xe9, 0x7b, 0x07, 0x73, + 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc2, 0x49, 0x5a, 0x9a, 0xfa, 0xd6, 0x91, 0xfe, + 0x9b, 0x9b, 0xb9, 0x77, 0x30, 0x77, 0x72, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, 0xcb, 0x82, + 0x33, 0xf1, 0xe7, 0x2f, 0xdf, 0xed, 0x38, 0x5e, 0x33, 0x6e, 0xb8, 0xd2, 0x7f, 0xc3, 0xf4, 0x4c, + 0x3e, 0xb3, 0x94, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xbb, 0x04, 0xa7, 0x32, 0x57, 0x0b, 0x9a, 0x82, + 0xf2, 0x2e, 0xe1, 0x5c, 0x50, 0x05, 0xd3, 0x9f, 0xe8, 0x24, 0x0c, 0xee, 0x39, 0xad, 0xae, 0xd8, + 0x28, 0x98, 0xff, 0x79, 0xb9, 0xf4, 0x92, 0x65, 0xff, 0xab, 0x32, 0x4c, 0x2e, 0xd5, 0x57, 0x1f, + 0x68, 0x17, 0xea, 0xd7, 0x50, 0xa9, 0xf0, 0x1a, 0x8a, 0x2f, 0xb5, 0x72, 0xee, 0xa5, 0xf6, 0xff, + 0x67, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x2d, 0x67, 0x0b, 0x1d, 0xf1, 0xc6, 0xd9, 0xcb, 0x59, + 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0xeb, 0x7e, 0xc3, 0x69, 0x25, 0x8f, 0xbe, 0x43, 0x2e, + 0xa5, 0xa3, 0x99, 0xc7, 0x06, 0x8c, 0x2d, 0x39, 0x1d, 0x67, 0xd3, 0x6d, 0xb9, 0x91, 0x4b, 0x42, + 0xf4, 0x24, 0x94, 0x9d, 0x66, 0x93, 0x71, 0x5b, 0x95, 0xc5, 0x53, 0xf7, 0x0e, 0xe6, 0xca, 0x0b, + 0x4d, 0x7a, 0xed, 0x83, 0xc2, 0xda, 0xc7, 0x14, 0x03, 0xbd, 0x1f, 0x06, 0x9a, 0x81, 0xdf, 0x99, + 0x29, 0x31, 0x4c, 0xba, 0xeb, 0x06, 0xaa, 0x81, 0xdf, 0x49, 0xa0, 0x32, 0x1c, 0xfb, 0x57, 0x4b, + 0xf0, 0xd8, 0x12, 0xe9, 0xec, 0xac, 0xd4, 0x73, 0xce, 0xef, 0x8b, 0x30, 0xd2, 0xf6, 0x3d, 0x37, + 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0xc3, 0x40, 0x27, + 0x66, 0x2a, 0xc7, 0x24, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0xdd, 0x90, 0x04, 0x62, 0xc5, + 0x28, 0x8c, 0x9b, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xb8, + 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0x95, 0x30, 0x31, 0xb3, 0x7d, 0x6d, 0xd3, 0x71, 0x76, + 0x75, 0xab, 0x99, 0x8c, 0x89, 0x18, 0x37, 0xca, 0x50, 0xcf, 0xab, 0xfb, 0xab, 0x25, 0x40, 0x7c, + 0x08, 0xbf, 0xc9, 0x06, 0xee, 0x66, 0x7a, 0xe0, 0xfa, 0xdf, 0x12, 0x47, 0x35, 0x7a, 0xff, 0xd3, + 0x82, 0xc7, 0x96, 0x5c, 0xaf, 0x49, 0x82, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x87, 0x63, 0x1a, + 0x8c, 0x25, 0x36, 0x70, 0x04, 0x4b, 0xcc, 0xfe, 0x63, 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdc, 0xc7, + 0xde, 0x4c, 0x7f, 0xec, 0x11, 0x2c, 0x0b, 0xfb, 0x3a, 0x4c, 0x2c, 0xb5, 0x5c, 0xe2, 0x45, 0xab, + 0xb5, 0x25, 0xdf, 0xdb, 0x72, 0xb7, 0xd1, 0xcb, 0x30, 0x11, 0xb9, 0x6d, 0xe2, 0x77, 0xa3, 0x3a, + 0x69, 0xf8, 0x1e, 0x7b, 0x49, 0x5a, 0x17, 0x07, 0x17, 0xd1, 0xbd, 0x83, 0xb9, 0x89, 0x0d, 0x03, + 0x82, 0x13, 0x98, 0xf6, 0xef, 0xd2, 0xf1, 0xf3, 0xdb, 0x1d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, + 0x4d, 0x2e, 0x71, 0x78, 0x19, 0x06, 0x22, 0x3a, 0x1e, 0x7c, 0xec, 0x2e, 0xc8, 0x8d, 0x42, 0x47, + 0xe1, 0xfe, 0xc1, 0xdc, 0xe9, 0x74, 0x0d, 0x36, 0x4e, 0xac, 0x0e, 0xfa, 0x36, 0x18, 0x0a, 0x23, + 0x27, 0xea, 0x86, 0x62, 0x34, 0x1f, 0x97, 0xa3, 0x59, 0x67, 0xa5, 0xf7, 0x0f, 0xe6, 0x26, 0x55, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x0c, 0xb7, 0x49, 0x18, 0x3a, 0xdb, 0xf2, 0x36, 0x9c, + 0x14, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x92, 0x20, 0xf0, 0x03, 0xb1, + 0x47, 0xc7, 0x05, 0xe2, 0xe0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x86, 0x05, 0x93, 0xaa, 0xaf, + 0xbc, 0xad, 0x63, 0x78, 0x15, 0x7c, 0x02, 0xa0, 0x21, 0x3f, 0x30, 0x64, 0xb7, 0xc7, 0xe8, 0x73, + 0x17, 0x32, 0x2f, 0xea, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x3f, 0xb1, + 0xe0, 0x44, 0xe2, 0x8b, 0xae, 0xbb, 0x61, 0x84, 0xde, 0x48, 0x7d, 0xd5, 0x7c, 0x7f, 0x5f, 0x45, + 0x6b, 0xb3, 0x6f, 0x52, 0x4b, 0x59, 0x96, 0x68, 0x5f, 0x74, 0x15, 0x06, 0xdd, 0x88, 0xb4, 0xe5, + 0xc7, 0x3c, 0x51, 0xf8, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0xac, 0xd2, 0x9a, 0x98, 0x13, 0xb0, 0x7f, + 0xb5, 0x0c, 0x15, 0xbe, 0x6c, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xd3, 0x50, 0x71, 0xdb, 0xed, + 0x6e, 0xe4, 0x6c, 0x8a, 0xe3, 0x7c, 0x84, 0x6f, 0xad, 0x55, 0x59, 0x88, 0x63, 0x38, 0x5a, 0x85, + 0x01, 0xd6, 0x15, 0xfe, 0x95, 0x4f, 0x66, 0x7f, 0xa5, 0xe8, 0xfb, 0x7c, 0xd5, 0x89, 0x1c, 0xce, + 0x49, 0xa9, 0x7b, 0x84, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0x9b, 0xae, 0xe7, 0x04, 0xfb, 0xb4, + 0x6c, 0xa6, 0xcc, 0x08, 0x3e, 0x5b, 0x4c, 0x70, 0x51, 0xe1, 0x73, 0xb2, 0xea, 0xc3, 0x62, 0x00, + 0xd6, 0x88, 0xce, 0x7e, 0x08, 0x2a, 0x0a, 0xf9, 0x30, 0x0c, 0xd1, 0xec, 0x47, 0x60, 0x32, 0xd1, + 0x56, 0xaf, 0xea, 0x63, 0x3a, 0x3f, 0xf5, 0x4b, 0xec, 0xc8, 0x10, 0xbd, 0x5e, 0xf6, 0xf6, 0xc4, + 0x91, 0xfb, 0x16, 0x9c, 0x6c, 0x65, 0x9c, 0x64, 0x62, 0x5e, 0xfb, 0x3f, 0xf9, 0x1e, 0x13, 0x9f, + 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0xa0, 0x3c, 0x82, 0xdf, 0xa1, 0x1b, 0xc4, 0x69, 0xe9, 0xec, + 0xf6, 0x0d, 0x51, 0x86, 0x15, 0x94, 0x9e, 0x77, 0x27, 0x55, 0xe7, 0xaf, 0x91, 0xfd, 0x3a, 0x69, + 0x91, 0x46, 0xe4, 0x07, 0xdf, 0xd0, 0xee, 0x9f, 0xe5, 0xa3, 0xcf, 0x8f, 0xcb, 0x51, 0x41, 0xa0, + 0x7c, 0x8d, 0xec, 0xf3, 0xa9, 0xd0, 0xbf, 0xae, 0x5c, 0xf8, 0x75, 0x3f, 0x6b, 0xc1, 0xb8, 0xfa, + 0xba, 0x63, 0x38, 0x17, 0x16, 0xcd, 0x73, 0xe1, 0x6c, 0xe1, 0x02, 0xcf, 0x39, 0x11, 0xbe, 0x5a, + 0x82, 0x33, 0x0a, 0x87, 0xbe, 0x0d, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0x41, 0xc5, 0x53, 0x52, 0x2b, + 0xcb, 0x14, 0x17, 0xc5, 0x32, 0xab, 0x18, 0x87, 0xb2, 0x78, 0x5e, 0x2c, 0x5a, 0x1a, 0xd3, 0xc5, + 0xb9, 0x42, 0x74, 0xbb, 0x08, 0xe5, 0xae, 0xdb, 0x14, 0x17, 0xcc, 0x07, 0xe4, 0x68, 0xdf, 0x5c, + 0xad, 0xde, 0x3f, 0x98, 0x7b, 0x3c, 0x4f, 0x95, 0x40, 0x6f, 0xb6, 0x70, 0xfe, 0xe6, 0x6a, 0x15, + 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xb6, 0xe4, 0x16, 0x65, 0xb7, 0x7c, 0x4f, 0xdc, 0x43, 0x4a, + 0x26, 0x8b, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, 0x2a, 0x4c, 0xed, 0x76, 0x37, 0x49, 0x8b, 0x44, 0xfc, + 0x83, 0xaf, 0x11, 0x2e, 0xb1, 0xac, 0xc4, 0x2f, 0xb3, 0x6b, 0x09, 0x38, 0x4e, 0xd5, 0xb0, 0xff, + 0x9c, 0xdd, 0x07, 0x62, 0xf4, 0x6a, 0x81, 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x8d, 0x5c, 0xce, 0xfd, + 0xac, 0x8a, 0x6b, 0x64, 0x7f, 0xc3, 0xa7, 0x9c, 0x79, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xe1, + 0x9a, 0xff, 0xb9, 0x12, 0x9c, 0x52, 0x23, 0x60, 0x30, 0x81, 0xdf, 0xec, 0x63, 0x70, 0x19, 0x46, + 0x9b, 0x64, 0xcb, 0xe9, 0xb6, 0x22, 0x25, 0x3e, 0x1f, 0xe4, 0x2a, 0x94, 0x6a, 0x5c, 0x8c, 0x75, + 0x9c, 0x43, 0x0c, 0xdb, 0xff, 0x1a, 0x65, 0x17, 0x71, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, + 0xbb, 0x6b, 0x9e, 0x80, 0x41, 0xb7, 0x4d, 0x19, 0xb3, 0x92, 0xc9, 0x6f, 0xad, 0xd2, 0x42, 0xcc, + 0x61, 0xe8, 0x7d, 0x30, 0xdc, 0xf0, 0xdb, 0x6d, 0xc7, 0x6b, 0xb2, 0x2b, 0xaf, 0xb2, 0x38, 0x4a, + 0x79, 0xb7, 0x25, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, 0x95, + 0xc5, 0x11, 0xda, 0xd2, 0x42, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0x77, 0xfc, 0x60, 0xd7, + 0xf5, 0xb6, 0xab, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0x6f, 0x2b, 0x08, 0xd6, 0xb0, 0xd0, 0x0a, + 0x0c, 0x76, 0xfc, 0x20, 0x0a, 0x67, 0x86, 0xd8, 0x70, 0x3f, 0x9e, 0x73, 0x10, 0xf1, 0xaf, 0xad, + 0xf9, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0x5d, 0x87, 0x61, 0xe2, 0xed, 0xad, + 0x04, 0x7e, 0x7b, 0xe6, 0x44, 0x3e, 0xa5, 0x65, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, + 0x2c, 0x49, 0xa0, 0x6f, 0x83, 0x32, 0xf1, 0xf6, 0x66, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0xb7, + 0x9c, 0x20, 0x3e, 0xf3, 0x97, 0xbd, 0x3d, 0x4c, 0xeb, 0xa0, 0x8f, 0x43, 0x45, 0x1e, 0x18, 0xa1, + 0x10, 0xd6, 0x65, 0x2e, 0x58, 0x79, 0xcc, 0x60, 0xf2, 0x66, 0xd7, 0x0d, 0x48, 0x9b, 0x78, 0x51, + 0x18, 0x9f, 0x90, 0x12, 0x1a, 0xe2, 0x98, 0x1a, 0xfa, 0xb8, 0x94, 0x10, 0xaf, 0xf9, 0x5d, 0x2f, + 0x0a, 0x67, 0x2a, 0xac, 0x7b, 0x99, 0xba, 0xbb, 0x5b, 0x31, 0x5e, 0x52, 0x84, 0xcc, 0x2b, 0x63, + 0x83, 0x14, 0xfa, 0x24, 0x8c, 0xf3, 0xff, 0x5c, 0x03, 0x16, 0xce, 0x9c, 0x62, 0xb4, 0xcf, 0xe7, + 0xd3, 0xe6, 0x88, 0x8b, 0xa7, 0x04, 0xf1, 0x71, 0xbd, 0x34, 0xc4, 0x26, 0x35, 0x84, 0x61, 0xbc, + 0xe5, 0xee, 0x11, 0x8f, 0x84, 0x61, 0x2d, 0xf0, 0x37, 0xc9, 0x0c, 0xb0, 0x81, 0x39, 0x93, 0xad, + 0x31, 0xf3, 0x37, 0xc9, 0xe2, 0x34, 0xa5, 0x79, 0x5d, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x26, 0x4c, + 0xd0, 0x17, 0x9b, 0x1b, 0x13, 0x1d, 0xed, 0x45, 0x94, 0xbd, 0xab, 0xb0, 0x51, 0x09, 0x27, 0x88, + 0xa0, 0x1b, 0x30, 0x16, 0x46, 0x4e, 0x10, 0x75, 0x3b, 0x9c, 0xe8, 0xe9, 0x5e, 0x44, 0x99, 0xc2, + 0xb5, 0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x1a, 0x54, 0x5a, 0xee, 0x16, 0x69, 0xec, 0x37, 0x5a, + 0x64, 0x66, 0x8c, 0x51, 0xcb, 0x3c, 0x54, 0xae, 0x4b, 0x24, 0xce, 0xe7, 0xaa, 0xbf, 0x38, 0xae, + 0x8e, 0x6e, 0xc1, 0xe9, 0x88, 0x04, 0x6d, 0xd7, 0x73, 0xe8, 0x61, 0x20, 0x9e, 0x56, 0x4c, 0x91, + 0x39, 0xce, 0x76, 0xdb, 0x39, 0x31, 0x1b, 0xa7, 0x37, 0x32, 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x17, + 0x66, 0x32, 0x20, 0x7e, 0xcb, 0x6d, 0xec, 0xcf, 0x9c, 0x64, 0x94, 0x3f, 0x2c, 0x28, 0xcf, 0x6c, + 0xe4, 0xe0, 0xdd, 0x2f, 0x80, 0xe1, 0x5c, 0xea, 0xe8, 0x06, 0x4c, 0xb2, 0x13, 0xa8, 0xd6, 0x6d, + 0xb5, 0x44, 0x83, 0x13, 0xac, 0xc1, 0xf7, 0xc9, 0xfb, 0x78, 0xd5, 0x04, 0xdf, 0x3f, 0x98, 0x83, + 0xf8, 0x1f, 0x4e, 0xd6, 0x46, 0x9b, 0x4c, 0x67, 0xd6, 0x0d, 0xdc, 0x68, 0x9f, 0x9e, 0x1b, 0xe4, + 0x6e, 0x34, 0x33, 0x59, 0x28, 0xaf, 0xd0, 0x51, 0x95, 0x62, 0x4d, 0x2f, 0xc4, 0x49, 0x82, 0xf4, + 0x48, 0x0d, 0xa3, 0xa6, 0xeb, 0xcd, 0x4c, 0xf1, 0x77, 0x89, 0x3c, 0x91, 0xea, 0xb4, 0x10, 0x73, + 0x18, 0xd3, 0x97, 0xd1, 0x1f, 0x37, 0xe8, 0xcd, 0x35, 0xcd, 0x10, 0x63, 0x7d, 0x99, 0x04, 0xe0, + 0x18, 0x87, 0x32, 0x93, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0x3a, 0x58, 0x36, 0x36, 0x3e, 0x8e, + 0x69, 0xb9, 0xbd, 0x09, 0x13, 0xea, 0x20, 0x64, 0x63, 0x82, 0xe6, 0x60, 0x90, 0xb1, 0x4f, 0x42, + 0xba, 0x56, 0xa1, 0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xee, 0x5b, 0x64, 0x71, 0x3f, + 0x22, 0xfc, 0x4d, 0x5f, 0xd6, 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x43, 0xe3, + 0xd3, 0xb6, 0x8f, 0xfb, 0xe5, 0x19, 0x18, 0xd9, 0xf1, 0xc3, 0x88, 0x62, 0xb3, 0x36, 0x06, 0x63, + 0xc6, 0xf3, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x02, 0xe3, 0x0d, 0xbd, 0x01, 0x71, 0x39, 0xaa, + 0x63, 0xc4, 0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x25, 0x18, 0x61, 0x36, 0x20, 0x0d, 0xbf, 0x25, 0xb8, + 0x36, 0x79, 0xc3, 0x8f, 0xd4, 0x44, 0xf9, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x02, 0x0c, 0xd1, + 0x2e, 0xac, 0xd6, 0xc4, 0xb5, 0xa4, 0x04, 0x45, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x2f, 0x95, + 0xb4, 0x51, 0xa6, 0xef, 0x61, 0x82, 0x6a, 0x30, 0x7c, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x16, 0xfc, + 0xc7, 0x53, 0x85, 0x77, 0x14, 0xab, 0x74, 0x9b, 0x57, 0xe0, 0xb7, 0xa8, 0xf8, 0x83, 0x25, 0x19, + 0x4a, 0x31, 0xe8, 0x7a, 0x1e, 0xa5, 0x58, 0xea, 0x97, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, + 0x4b, 0x32, 0xe8, 0x0d, 0x00, 0xb9, 0xc3, 0x48, 0x53, 0xd8, 0x5e, 0x3c, 0xd3, 0x9b, 0xe8, 0x86, + 0xaa, 0xb3, 0x38, 0x41, 0xef, 0xe8, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x11, 0xe3, 0xd3, 0xd2, 0x9d, + 0x41, 0xdf, 0x41, 0x97, 0xb8, 0x13, 0x44, 0xa4, 0xb9, 0x10, 0x89, 0xc1, 0x79, 0x7f, 0x7f, 0x8f, + 0x94, 0x0d, 0xb7, 0x4d, 0xf4, 0xed, 0x20, 0x88, 0xe0, 0x98, 0x9e, 0xfd, 0x0b, 0x65, 0x98, 0xc9, + 0xeb, 0x2e, 0x5d, 0x74, 0xe4, 0xae, 0x1b, 0x2d, 0x51, 0xf6, 0xca, 0x32, 0x17, 0xdd, 0xb2, 0x28, + 0xc7, 0x0a, 0x83, 0xce, 0x7e, 0xe8, 0x6e, 0xcb, 0x37, 0xe6, 0x60, 0x3c, 0xfb, 0x75, 0x56, 0x8a, + 0x05, 0x94, 0xe2, 0x05, 0xc4, 0x09, 0x85, 0x71, 0x8f, 0xb6, 0x4a, 0x30, 0x2b, 0xc5, 0x02, 0xaa, + 0x4b, 0xbb, 0x06, 0x7a, 0x48, 0xbb, 0x8c, 0x21, 0x1a, 0x3c, 0xda, 0x21, 0x42, 0x9f, 0x02, 0xd8, + 0x72, 0x3d, 0x37, 0xdc, 0x61, 0xd4, 0x87, 0x0e, 0x4d, 0x5d, 0x31, 0x67, 0x2b, 0x8a, 0x0a, 0xd6, + 0x28, 0xa2, 0x17, 0x61, 0x54, 0x6d, 0xc0, 0xd5, 0x2a, 0xd3, 0x74, 0x6a, 0x96, 0x23, 0xf1, 0x69, + 0x54, 0xc5, 0x3a, 0x9e, 0xfd, 0x99, 0xe4, 0x7a, 0x11, 0x3b, 0x40, 0x1b, 0x5f, 0xab, 0xdf, 0xf1, + 0x2d, 0x15, 0x8f, 0xaf, 0xfd, 0xf5, 0x32, 0x4c, 0x1a, 0x8d, 0x75, 0xc3, 0x3e, 0xce, 0xac, 0x2b, + 0xf4, 0x00, 0x77, 0x22, 0x22, 0xf6, 0x9f, 0xdd, 0x7b, 0xab, 0xe8, 0x87, 0x3c, 0xdd, 0x01, 0xbc, + 0x3e, 0xfa, 0x14, 0x54, 0x5a, 0x4e, 0xc8, 0x24, 0x67, 0x44, 0xec, 0xbb, 0x7e, 0x88, 0xc5, 0x0f, + 0x13, 0x27, 0x8c, 0xb4, 0x5b, 0x93, 0xd3, 0x8e, 0x49, 0xd2, 0x9b, 0x86, 0xf2, 0x27, 0xd2, 0x7a, + 0x4c, 0x75, 0x82, 0x32, 0x31, 0xfb, 0x98, 0xc3, 0xd0, 0x4b, 0x30, 0x16, 0x10, 0xb6, 0x2a, 0x96, + 0x28, 0x37, 0xc7, 0x96, 0xd9, 0x60, 0xcc, 0xf6, 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0xf8, 0x6d, 0x30, + 0x54, 0xf0, 0x36, 0x78, 0x0a, 0x86, 0xd9, 0x0f, 0xb5, 0x02, 0xd4, 0x6c, 0xac, 0xf2, 0x62, 0x2c, + 0xe1, 0xc9, 0x05, 0x33, 0xd2, 0xdf, 0x82, 0xa1, 0xaf, 0x0f, 0xb1, 0xa8, 0x99, 0x96, 0x79, 0x84, + 0x9f, 0x72, 0x62, 0xc9, 0x63, 0x09, 0xb3, 0xdf, 0x0f, 0x13, 0x55, 0x87, 0xb4, 0x7d, 0x6f, 0xd9, + 0x6b, 0x76, 0x7c, 0xd7, 0x8b, 0xd0, 0x0c, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, + 0xcc, 0x4a, 0xec, 0x6d, 0x38, 0x55, 0xf5, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xb9, 0x50, 0x5b, 0xd5, + 0xde, 0xd7, 0xeb, 0xf2, 0x7d, 0xc7, 0x8d, 0xb6, 0x32, 0x8f, 0x5e, 0xad, 0x26, 0x67, 0x6b, 0x57, + 0xdc, 0x16, 0xc9, 0x91, 0x82, 0xfc, 0xd5, 0x92, 0xd1, 0x52, 0x8c, 0xaf, 0xb4, 0x5a, 0x56, 0xae, + 0x56, 0xeb, 0x75, 0x18, 0xd9, 0x72, 0x49, 0xab, 0x89, 0xc9, 0x96, 0x58, 0x89, 0x4f, 0xe6, 0xdb, + 0xa1, 0xac, 0x50, 0x4c, 0x29, 0xf5, 0xe2, 0xaf, 0xc3, 0x15, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x17, + 0xa6, 0xe4, 0x83, 0x41, 0x42, 0xc5, 0xba, 0x7c, 0xaa, 0xe8, 0x15, 0x62, 0x12, 0x3f, 0x79, 0xef, + 0x60, 0x6e, 0x0a, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0xfa, 0x1c, 0x6c, 0xd3, 0x13, 0x78, 0x80, 0x0d, + 0x3f, 0x7b, 0x0e, 0xb2, 0x97, 0x2d, 0x2b, 0xb5, 0x7f, 0xdc, 0x82, 0x47, 0x52, 0x23, 0x23, 0x5e, + 0xf8, 0x47, 0x3c, 0x0b, 0xc9, 0x17, 0x77, 0xa9, 0xf7, 0x8b, 0xdb, 0xfe, 0x3b, 0x16, 0x9c, 0x5c, + 0x6e, 0x77, 0xa2, 0xfd, 0xaa, 0x6b, 0xaa, 0xa0, 0x3e, 0x04, 0x43, 0x6d, 0xd2, 0x74, 0xbb, 0x6d, + 0x31, 0x73, 0x73, 0xf2, 0x94, 0x5a, 0x63, 0xa5, 0xf7, 0x0f, 0xe6, 0xc6, 0xeb, 0x91, 0x1f, 0x38, + 0xdb, 0x84, 0x17, 0x60, 0x81, 0xce, 0xce, 0x7a, 0xf7, 0x2d, 0x72, 0xdd, 0x6d, 0xbb, 0xd2, 0xae, + 0xa8, 0x50, 0x66, 0x37, 0x2f, 0x07, 0x74, 0xfe, 0xf5, 0xae, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0xb4, + 0x47, 0x92, 0x08, 0x8e, 0xe9, 0xd9, 0x5f, 0xb3, 0x60, 0x52, 0xae, 0xfb, 0x85, 0x66, 0x33, 0x20, + 0x61, 0x88, 0x66, 0xa1, 0xe4, 0x76, 0x44, 0x2f, 0x41, 0xf4, 0xb2, 0xb4, 0x5a, 0xc3, 0x25, 0xb7, + 0x23, 0xd9, 0x32, 0x76, 0x10, 0x96, 0x4d, 0x45, 0xda, 0x55, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x08, + 0x23, 0x9e, 0xdf, 0xe4, 0xb6, 0x5d, 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, + 0x35, 0xa8, 0x70, 0xb3, 0xa7, 0x78, 0xd1, 0xf6, 0x65, 0x3c, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, + 0x31, 0x11, 0xfb, 0x57, 0x2c, 0x18, 0x93, 0x5f, 0xd6, 0x27, 0xcf, 0x49, 0xb7, 0x56, 0xcc, 0x6f, + 0xc6, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, 0x62, 0xb0, 0x8a, 0xe5, 0x43, 0xb1, 0x8a, 0x97, 0x61, 0xd4, + 0xe9, 0x74, 0x6a, 0x26, 0x9f, 0xc9, 0x96, 0xd2, 0x42, 0x5c, 0x8c, 0x75, 0x1c, 0xfb, 0xc7, 0x4a, + 0x30, 0x21, 0xbf, 0xa0, 0xde, 0xdd, 0x0c, 0x49, 0x84, 0x36, 0xa0, 0xe2, 0xf0, 0x59, 0x22, 0x72, + 0x91, 0x3f, 0x91, 0x2d, 0x47, 0x30, 0xa6, 0x34, 0xbe, 0xf0, 0x17, 0x64, 0x6d, 0x1c, 0x13, 0x42, + 0x2d, 0x98, 0xf6, 0xfc, 0x88, 0x1d, 0xfe, 0x0a, 0x5e, 0xa4, 0xda, 0x49, 0x52, 0x3f, 0x23, 0xa8, + 0x4f, 0xaf, 0x27, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x2c, 0x65, 0x33, 0xe5, 0x7c, 0x61, 0x80, 0x3e, + 0x71, 0xd9, 0xa2, 0x19, 0xfb, 0x97, 0x2d, 0xa8, 0x48, 0xb4, 0xe3, 0xd0, 0xe2, 0xad, 0xc1, 0x70, + 0xc8, 0x26, 0x41, 0x0e, 0x8d, 0x5d, 0xd4, 0x71, 0x3e, 0x5f, 0xf1, 0x9d, 0xc6, 0xff, 0x87, 0x58, + 0xd2, 0x60, 0xa2, 0x79, 0xd5, 0xfd, 0x77, 0x88, 0x68, 0x5e, 0xf5, 0x27, 0xe7, 0x52, 0xfa, 0x43, + 0xd6, 0x67, 0x4d, 0xd6, 0x45, 0x59, 0xaf, 0x4e, 0x40, 0xb6, 0xdc, 0xbb, 0x49, 0xd6, 0xab, 0xc6, + 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0x60, 0xac, 0x21, 0x65, 0xb2, 0xf1, 0x0e, 0xbf, 0x50, 0xa8, 0x1f, + 0x50, 0xaa, 0x24, 0x2e, 0x0b, 0x59, 0xd2, 0xea, 0x63, 0x83, 0x9a, 0x69, 0x46, 0x50, 0xee, 0x65, + 0x46, 0x10, 0xd3, 0xcd, 0x57, 0xaa, 0xff, 0x84, 0x05, 0x43, 0x5c, 0x16, 0xd7, 0x9f, 0x28, 0x54, + 0xd3, 0xac, 0xc5, 0x63, 0x77, 0x8b, 0x16, 0x0a, 0x4d, 0x19, 0x5a, 0x83, 0x0a, 0xfb, 0xc1, 0x64, + 0x89, 0xe5, 0x7c, 0xab, 0x7b, 0xde, 0xaa, 0xde, 0xc1, 0x5b, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x3f, + 0x5a, 0xa6, 0xa7, 0x5b, 0x8c, 0x6a, 0x5c, 0xfa, 0xd6, 0xc3, 0xbb, 0xf4, 0x4b, 0x0f, 0xeb, 0xd2, + 0xdf, 0x86, 0xc9, 0x86, 0xa6, 0x87, 0x8b, 0x67, 0xf2, 0x62, 0xe1, 0x22, 0xd1, 0x54, 0x76, 0x5c, + 0xca, 0xb2, 0x64, 0x12, 0xc1, 0x49, 0xaa, 0xe8, 0x3b, 0x60, 0x8c, 0xcf, 0xb3, 0x68, 0x85, 0x5b, + 0x62, 0xbc, 0x2f, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0xa9, 0x9c, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x3f, + 0xb1, 0x00, 0x2d, 0x77, 0x76, 0x48, 0x9b, 0x04, 0x4e, 0x2b, 0x16, 0xa7, 0xff, 0xa0, 0x05, 0x33, + 0x24, 0x55, 0xbc, 0xe4, 0xb7, 0xdb, 0xe2, 0xd1, 0x92, 0xf3, 0xae, 0x5e, 0xce, 0xa9, 0xa3, 0xdc, + 0x12, 0x66, 0xf2, 0x30, 0x70, 0x6e, 0x7b, 0x68, 0x0d, 0x4e, 0xf0, 0x5b, 0x52, 0x01, 0x34, 0xdb, + 0xeb, 0x47, 0x05, 0xe1, 0x13, 0x1b, 0x69, 0x14, 0x9c, 0x55, 0xcf, 0xfe, 0x9e, 0x31, 0xc8, 0xed, + 0xc5, 0xbb, 0x7a, 0x84, 0x77, 0xf5, 0x08, 0xef, 0xea, 0x11, 0xde, 0xd5, 0x23, 0xbc, 0xab, 0x47, + 0xf8, 0x96, 0xd7, 0x23, 0xfc, 0x65, 0x0b, 0x4e, 0xa9, 0x6b, 0xc0, 0x78, 0xf8, 0x7e, 0x16, 0x4e, + 0xf0, 0xed, 0xb6, 0xd4, 0x72, 0xdc, 0xf6, 0x06, 0x69, 0x77, 0x5a, 0x4e, 0x24, 0xb5, 0xee, 0x97, + 0x33, 0x57, 0x6e, 0xc2, 0x62, 0xd5, 0xa8, 0xb8, 0xf8, 0x08, 0xbd, 0x9e, 0x32, 0x00, 0x38, 0xab, + 0x19, 0xfb, 0x17, 0x46, 0x60, 0x70, 0x79, 0x8f, 0x78, 0xd1, 0x31, 0x3c, 0x11, 0x1a, 0x30, 0xe1, + 0x7a, 0x7b, 0x7e, 0x6b, 0x8f, 0x34, 0x39, 0xfc, 0x30, 0x2f, 0xd9, 0xd3, 0x82, 0xf4, 0xc4, 0xaa, + 0x41, 0x02, 0x27, 0x48, 0x3e, 0x0c, 0x69, 0xf2, 0x15, 0x18, 0xe2, 0x87, 0xb8, 0x10, 0x25, 0x67, + 0x9e, 0xd9, 0x6c, 0x10, 0xc5, 0xd5, 0x14, 0x4b, 0xba, 0xf9, 0x25, 0x21, 0xaa, 0xa3, 0xcf, 0xc0, + 0xc4, 0x96, 0x1b, 0x84, 0xd1, 0x86, 0xdb, 0x26, 0x61, 0xe4, 0xb4, 0x3b, 0x0f, 0x20, 0x3d, 0x56, + 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x1b, 0xc6, 0x5b, 0x8e, 0xde, 0xd4, 0xf0, 0xa1, + 0x9b, 0x52, 0xb7, 0xc3, 0x75, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed, 0xd4, 0x60, 0x02, 0xd0, 0x11, + 0x26, 0x16, 0x50, 0xdb, 0x89, 0x4b, 0x3e, 0x39, 0x8c, 0x32, 0x3a, 0xcc, 0x40, 0xb6, 0x62, 0x32, + 0x3a, 0x9a, 0x19, 0xec, 0xa7, 0xa1, 0x42, 0xe8, 0x10, 0x52, 0xc2, 0xe2, 0x82, 0xb9, 0xd4, 0x5f, + 0x5f, 0xd7, 0xdc, 0x46, 0xe0, 0x9b, 0x72, 0xfb, 0x65, 0x49, 0x09, 0xc7, 0x44, 0xd1, 0x12, 0x0c, + 0x85, 0x24, 0x70, 0x49, 0x28, 0xae, 0x9a, 0x82, 0x69, 0x64, 0x68, 0xdc, 0xb7, 0x84, 0xff, 0xc6, + 0xa2, 0x2a, 0x5d, 0x5e, 0x0e, 0x13, 0x69, 0xb2, 0xcb, 0x40, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, + 0x28, 0x7a, 0x0d, 0x86, 0x03, 0xd2, 0x62, 0x8a, 0xa1, 0xf1, 0xfe, 0x17, 0x39, 0xd7, 0x33, 0xf1, + 0x7a, 0x58, 0x12, 0x40, 0xd7, 0x00, 0x05, 0x84, 0x32, 0x4a, 0xae, 0xb7, 0xad, 0xcc, 0x46, 0xc5, + 0x41, 0xab, 0x18, 0x52, 0x1c, 0x63, 0x48, 0x37, 0x1f, 0x9c, 0x51, 0x0d, 0x5d, 0x81, 0x69, 0x55, + 0xba, 0xea, 0x85, 0x91, 0x43, 0x0f, 0xb8, 0x49, 0x46, 0x4b, 0xc9, 0x29, 0x70, 0x12, 0x01, 0xa7, + 0xeb, 0xd8, 0x5f, 0xb2, 0x80, 0x8f, 0xf3, 0x31, 0xbc, 0xce, 0x5f, 0x35, 0x5f, 0xe7, 0x67, 0x72, + 0x67, 0x2e, 0xe7, 0x65, 0xfe, 0x25, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0x55, 0xb0, 0x66, + 0xbb, 0x30, 0x45, 0x57, 0xfa, 0x8d, 0xcd, 0x90, 0x04, 0x7b, 0xa4, 0xc9, 0x16, 0x66, 0xe9, 0xc1, + 0x16, 0xa6, 0x32, 0x51, 0xbb, 0x9e, 0x20, 0x88, 0x53, 0x4d, 0xd8, 0x9f, 0x96, 0x5d, 0x55, 0x16, + 0x7d, 0x0d, 0x35, 0xe7, 0x09, 0x8b, 0x3e, 0x35, 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xc7, 0x0f, + 0xa3, 0xa4, 0x45, 0xdf, 0x55, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xcf, 0x03, 0x2c, 0xdf, 0x25, 0x0d, + 0xbe, 0x62, 0xf5, 0xc7, 0x83, 0x95, 0xff, 0x78, 0xb0, 0x7f, 0xcb, 0x82, 0x89, 0x95, 0x25, 0xe3, + 0xe6, 0x9a, 0x07, 0xe0, 0x2f, 0x9e, 0xdb, 0xb7, 0xd7, 0xa5, 0x3a, 0x9c, 0x6b, 0x34, 0x55, 0x29, + 0xd6, 0x30, 0xd0, 0x19, 0x28, 0xb7, 0xba, 0x9e, 0x10, 0x1f, 0x0e, 0xd3, 0xeb, 0xf1, 0x7a, 0xd7, + 0xc3, 0xb4, 0x4c, 0x73, 0x29, 0x28, 0xf7, 0xed, 0x52, 0xd0, 0xd3, 0xb5, 0x1f, 0xcd, 0xc1, 0xe0, + 0x9d, 0x3b, 0x6e, 0x93, 0x3b, 0x50, 0x0a, 0x55, 0xfd, 0xed, 0xdb, 0xab, 0xd5, 0x10, 0xf3, 0x72, + 0xfb, 0x0b, 0x65, 0x98, 0x5d, 0x69, 0x91, 0xbb, 0x6f, 0xd3, 0x89, 0xb4, 0x5f, 0x87, 0x88, 0xc3, + 0x09, 0x62, 0x0e, 0xeb, 0xf4, 0xd2, 0x7b, 0x3c, 0xb6, 0x60, 0x98, 0x1b, 0xb4, 0x49, 0x97, 0xd2, + 0x57, 0xb2, 0x5a, 0xcf, 0x1f, 0x90, 0x79, 0x6e, 0x18, 0x27, 0x3c, 0xe2, 0xd4, 0x85, 0x29, 0x4a, + 0xb1, 0x24, 0x3e, 0xfb, 0x32, 0x8c, 0xe9, 0x98, 0x87, 0x72, 0x3f, 0xfb, 0x0b, 0x65, 0x98, 0xa2, + 0x3d, 0x78, 0xa8, 0x13, 0x71, 0x33, 0x3d, 0x11, 0x47, 0xed, 0x82, 0xd4, 0x7b, 0x36, 0xde, 0x48, + 0xce, 0xc6, 0xe5, 0xbc, 0xd9, 0x38, 0xee, 0x39, 0xf8, 0x6e, 0x0b, 0x4e, 0xac, 0xb4, 0xfc, 0xc6, + 0x6e, 0xc2, 0x4d, 0xe8, 0x45, 0x18, 0xa5, 0xc7, 0x71, 0x68, 0x78, 0xb0, 0x1b, 0x31, 0x0d, 0x04, + 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0x9b, 0x37, 0x57, 0xab, 0x59, 0xa1, 0x10, 0x04, 0x08, 0xeb, 0x78, + 0xf6, 0xaf, 0x5b, 0x70, 0xf6, 0xca, 0xd2, 0x72, 0xbc, 0x14, 0x53, 0xd1, 0x18, 0x2e, 0xc0, 0x50, + 0xa7, 0xa9, 0x75, 0x25, 0x16, 0xaf, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x9d, 0x12, 0x69, 0xe4, 0x26, + 0xc0, 0x15, 0x5c, 0x5b, 0x12, 0xe7, 0xae, 0xd4, 0xa6, 0x58, 0xb9, 0xda, 0x94, 0xf7, 0xc1, 0x30, + 0xbd, 0x17, 0xdc, 0x86, 0xec, 0x37, 0x57, 0xd0, 0xf2, 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x58, 0x70, + 0xe2, 0x8a, 0x1b, 0xd1, 0x4b, 0x3b, 0x19, 0x6e, 0x80, 0xde, 0xda, 0xa1, 0x1b, 0xf9, 0xc1, 0x7e, + 0x32, 0xdc, 0x00, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x73, 0x99, 0x85, 0x76, 0xc9, 0xd4, + 0x5f, 0x61, 0x51, 0x8e, 0x15, 0x06, 0x1d, 0xaf, 0xa6, 0x1b, 0x30, 0xd1, 0xdf, 0xbe, 0x38, 0xb8, + 0xd5, 0x78, 0x55, 0x25, 0x00, 0xc7, 0x38, 0xf6, 0x1f, 0x59, 0x30, 0x77, 0xa5, 0xd5, 0x0d, 0x23, + 0x12, 0x6c, 0x85, 0x39, 0x87, 0xee, 0xf3, 0x50, 0x21, 0x52, 0xd0, 0x2e, 0x7a, 0xad, 0x18, 0x51, + 0x25, 0x81, 0xe7, 0x51, 0x0f, 0x14, 0x5e, 0x1f, 0xbe, 0x8c, 0x87, 0x73, 0x46, 0x5b, 0x01, 0x44, + 0xf4, 0xb6, 0xf4, 0x30, 0x10, 0xcc, 0x9f, 0x7c, 0x39, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x1f, 0xb7, + 0xe0, 0x94, 0xfa, 0xe0, 0x77, 0xdc, 0x67, 0xda, 0x5f, 0x29, 0xc1, 0xf8, 0xd5, 0x8d, 0x8d, 0xda, + 0x15, 0x12, 0x69, 0xab, 0xb2, 0x58, 0x7d, 0x8e, 0x35, 0x2d, 0x60, 0xd1, 0x1b, 0xb1, 0x1b, 0xb9, + 0xad, 0x79, 0x1e, 0x4d, 0x68, 0x7e, 0xd5, 0x8b, 0x6e, 0x04, 0xf5, 0x28, 0x70, 0xbd, 0xed, 0xcc, + 0x95, 0x2e, 0x79, 0x96, 0x72, 0x1e, 0xcf, 0x82, 0x9e, 0x87, 0x21, 0x16, 0xce, 0x48, 0x4e, 0xc2, + 0xa3, 0xea, 0x89, 0xc5, 0x4a, 0xef, 0x1f, 0xcc, 0x55, 0x6e, 0xe2, 0x55, 0xfe, 0x07, 0x0b, 0x54, + 0x74, 0x13, 0x46, 0x77, 0xa2, 0xa8, 0x73, 0x95, 0x38, 0x4d, 0x12, 0xc8, 0x53, 0xf6, 0x5c, 0xd6, + 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5, 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x07, 0x88, + 0x61, 0x47, 0xa4, 0x00, 0xb1, 0x37, 0xa0, 0x42, 0x3f, 0x77, 0xa1, 0xe5, 0x3a, 0xc5, 0x2a, 0xe6, + 0xa7, 0xa1, 0x22, 0x15, 0xc8, 0xa1, 0xf0, 0xb5, 0x66, 0x37, 0x92, 0xd4, 0x2f, 0x87, 0x38, 0x86, + 0xdb, 0x5b, 0x70, 0x92, 0x99, 0x03, 0x3a, 0xd1, 0x8e, 0xb1, 0xfa, 0x7a, 0x4f, 0xf3, 0x33, 0xe2, + 0xc5, 0xc6, 0xfb, 0x3c, 0xa3, 0xb9, 0x33, 0x8e, 0x49, 0x8a, 0xf1, 0xeb, 0xcd, 0xfe, 0xfa, 0x00, + 0x3c, 0xba, 0x5a, 0xcf, 0x0f, 0xc7, 0xf1, 0x12, 0x8c, 0x71, 0x46, 0x90, 0x4e, 0xba, 0xd3, 0x12, + 0xed, 0x2a, 0xd9, 0xe6, 0x86, 0x06, 0xc3, 0x06, 0x26, 0x3a, 0x0b, 0x65, 0xf7, 0x4d, 0x2f, 0xe9, + 0xec, 0xb3, 0xfa, 0xfa, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0x79, 0x4a, 0x7e, 0x58, 0x2b, 0xb0, 0xe2, + 0x2b, 0x5f, 0x85, 0x09, 0x37, 0x6c, 0x84, 0xee, 0xaa, 0x47, 0x77, 0xa0, 0xb6, 0x87, 0x95, 0x34, + 0x81, 0x76, 0x5a, 0x41, 0x71, 0x02, 0x5b, 0xbb, 0x39, 0x06, 0xfb, 0xe6, 0x4b, 0x7b, 0x3a, 0x1f, + 0xd3, 0x83, 0xbd, 0xc3, 0xbe, 0x2e, 0x64, 0x42, 0x6a, 0x71, 0xb0, 0xf3, 0x0f, 0x0e, 0xb1, 0x84, + 0xd1, 0xa7, 0x5a, 0x63, 0xc7, 0xe9, 0x2c, 0x74, 0xa3, 0x9d, 0xaa, 0x1b, 0x36, 0xfc, 0x3d, 0x12, + 0xec, 0xb3, 0x57, 0xf6, 0x48, 0xfc, 0x54, 0x53, 0x80, 0xa5, 0xab, 0x0b, 0x35, 0x8a, 0x89, 0xd3, + 0x75, 0xd0, 0x02, 0x4c, 0xca, 0xc2, 0x3a, 0x09, 0xd9, 0xe1, 0x3e, 0xca, 0xc8, 0x28, 0xf7, 0x1b, + 0x51, 0xac, 0x88, 0x24, 0xf1, 0x4d, 0xd6, 0x15, 0x8e, 0x82, 0x75, 0xfd, 0x10, 0x8c, 0xbb, 0x9e, + 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x0d, 0x0b, 0x7f, 0x50, 0x33, 0xd1, 0xf1, 0xaa, 0x0e, 0xc0, 0x26, + 0x9e, 0xfd, 0x5f, 0x06, 0x60, 0x9a, 0x4d, 0xdb, 0xbb, 0x2b, 0xec, 0x5b, 0x69, 0x85, 0xdd, 0x4c, + 0xaf, 0xb0, 0xa3, 0xe0, 0xc9, 0x1f, 0x78, 0x99, 0x7d, 0x06, 0x2a, 0xca, 0xe3, 0x48, 0xba, 0x1c, + 0x5a, 0x39, 0x2e, 0x87, 0xbd, 0xef, 0x65, 0x69, 0xb4, 0x55, 0xce, 0x34, 0xda, 0xfa, 0xb2, 0x05, + 0xb1, 0xca, 0x00, 0xbd, 0x0e, 0x95, 0x8e, 0xcf, 0x6c, 0x11, 0x03, 0x69, 0xe0, 0xfb, 0xde, 0x42, + 0x9d, 0x03, 0x8f, 0x48, 0x14, 0xf0, 0x51, 0xa8, 0xc9, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc1, 0x70, + 0x27, 0x20, 0xf5, 0x88, 0x85, 0xe7, 0xe8, 0x9f, 0x20, 0x5f, 0x35, 0xbc, 0x22, 0x96, 0x14, 0xec, + 0xff, 0x6a, 0xc1, 0x54, 0x12, 0x15, 0x7d, 0x18, 0x06, 0xc8, 0x5d, 0xd2, 0x10, 0xfd, 0xcd, 0xbc, + 0x64, 0x63, 0xa1, 0x03, 0x1f, 0x00, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x2a, 0x0c, 0xd3, 0x1b, 0xf6, + 0x8a, 0x0a, 0x0d, 0xf5, 0x78, 0xde, 0x2d, 0xad, 0x58, 0x15, 0xde, 0x39, 0x51, 0x84, 0x65, 0x75, + 0x66, 0x29, 0xd5, 0xe8, 0xd4, 0xe9, 0xe3, 0x25, 0x2a, 0x7a, 0x63, 0x6f, 0x2c, 0xd5, 0x38, 0x92, + 0xa0, 0xc6, 0x2d, 0xa5, 0x64, 0x21, 0x8e, 0x89, 0xd8, 0x3f, 0x67, 0x01, 0x70, 0xc3, 0x30, 0xc7, + 0xdb, 0x26, 0xc7, 0x20, 0x27, 0xaf, 0xc2, 0x40, 0xd8, 0x21, 0x8d, 0x22, 0x33, 0xd9, 0xb8, 0x3f, + 0xf5, 0x0e, 0x69, 0xc4, 0x2b, 0x8e, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xbd, 0x00, 0x13, 0x31, 0xda, + 0x6a, 0x44, 0xda, 0xe8, 0x59, 0x23, 0x4c, 0xc1, 0x99, 0x44, 0x98, 0x82, 0x0a, 0xc3, 0xd6, 0x44, + 0xb2, 0x9f, 0x81, 0x72, 0xdb, 0xb9, 0x2b, 0x64, 0x6e, 0x4f, 0x17, 0x77, 0x83, 0xd2, 0x9f, 0x5f, + 0x73, 0xee, 0xf2, 0x67, 0xe9, 0xd3, 0x72, 0x87, 0xac, 0x39, 0x77, 0xef, 0x73, 0x63, 0x58, 0x76, + 0x4a, 0x5f, 0x77, 0xc3, 0xe8, 0x73, 0xff, 0x39, 0xfe, 0xcf, 0xf6, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, + 0x7a, 0xc2, 0xe6, 0xa9, 0xaf, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0xf5, 0xd1, 0x96, 0xeb, 0xa1, + 0xb7, 0x60, 0x58, 0x98, 0x24, 0x8a, 0xb0, 0x40, 0x97, 0xfa, 0x68, 0x4f, 0x58, 0x34, 0xf2, 0x36, + 0x2f, 0xc9, 0x67, 0xb7, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x15, 0x0b, 0x26, 0xc4, 0x6f, + 0x4c, 0xde, 0xec, 0x92, 0x30, 0x12, 0x6c, 0xe9, 0x07, 0xfb, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, + 0x41, 0x79, 0xcf, 0x98, 0xc0, 0x9e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x3d, 0x0b, 0x4e, 0xb6, 0x9d, + 0xbb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x6a, 0xff, 0xc3, 0xfd, 0x4d, 0x7f, 0xaa, + 0x3a, 0xef, 0xa4, 0xd4, 0x3f, 0x9e, 0xcc, 0x42, 0xe9, 0xd9, 0xd5, 0xcc, 0x7e, 0xcd, 0x6e, 0xc1, + 0x88, 0x5c, 0x6f, 0x19, 0xc2, 0x8d, 0xaa, 0xce, 0x73, 0x1f, 0xda, 0x22, 0x54, 0x77, 0xff, 0xa7, + 0xed, 0x88, 0xb5, 0xf6, 0x50, 0xdb, 0xf9, 0x0c, 0x8c, 0xe9, 0x6b, 0xec, 0xa1, 0xb6, 0xf5, 0x26, + 0x9c, 0xc8, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70, 0x26, 0x77, 0x7d, 0x3c, 0xcc, 0x86, 0xed, + 0xaf, 0x58, 0xfa, 0x39, 0x78, 0x0c, 0xca, 0x8a, 0x25, 0x53, 0x59, 0x71, 0xae, 0x78, 0xe7, 0xe4, + 0x68, 0x2c, 0xde, 0xd0, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x1a, 0x0c, 0xb5, 0x68, 0x89, 0x34, 0x6c, + 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x99, 0x64, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, 0xb4, 0x60, 0xe0, + 0x18, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x6c, 0x2e, 0x69, 0x11, 0xb1, 0x78, 0x1e, 0x3b, 0x77, 0x96, + 0xef, 0x46, 0xc4, 0x0b, 0xd9, 0x8d, 0x9c, 0x39, 0x30, 0x3f, 0x65, 0xc1, 0x89, 0xeb, 0xbe, 0xd3, + 0x5c, 0x74, 0x5a, 0x8e, 0xd7, 0x20, 0xc1, 0xaa, 0xb7, 0x7d, 0x28, 0xab, 0xec, 0x52, 0x4f, 0xab, + 0xec, 0x25, 0x69, 0xd4, 0x34, 0x90, 0x3f, 0x7f, 0x94, 0x93, 0x4e, 0x06, 0x6e, 0x31, 0xcc, 0x6f, + 0x77, 0x00, 0xe9, 0xbd, 0x14, 0x3e, 0x32, 0x18, 0x86, 0x5d, 0xde, 0x5f, 0x31, 0x89, 0x4f, 0x66, + 0x73, 0xb8, 0xa9, 0xcf, 0xd3, 0xbc, 0x3f, 0x78, 0x01, 0x96, 0x84, 0xec, 0x97, 0x20, 0xd3, 0xd1, + 0xbe, 0xb7, 0x5c, 0xc2, 0xfe, 0x38, 0x4c, 0xb3, 0x9a, 0x87, 0x94, 0x0c, 0xd8, 0x09, 0x69, 0x6a, + 0x46, 0x08, 0x3e, 0xfb, 0xf3, 0x16, 0x4c, 0xae, 0x27, 0x22, 0x93, 0x5d, 0x60, 0xfa, 0xd7, 0x0c, + 0x21, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x72, 0x21, 0xd7, 0x9f, 0x5b, 0x10, 0xc7, 0xbe, 0x38, + 0x06, 0xf6, 0x6d, 0xc9, 0x60, 0xdf, 0x32, 0x19, 0x59, 0xd5, 0x9d, 0x3c, 0xee, 0x0d, 0x5d, 0x53, + 0x51, 0xa1, 0x0a, 0x78, 0xd8, 0x98, 0x0c, 0x5f, 0x8a, 0x13, 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, + 0xfe, 0xed, 0x12, 0x20, 0x85, 0xdb, 0x77, 0xd4, 0xaa, 0x74, 0x8d, 0xa3, 0x89, 0x5a, 0xb5, 0x07, + 0x88, 0x59, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb2, 0xae, 0x10, 0xeb, 0x1d, 0xce, 0x3c, 0x61, 0x56, + 0x34, 0x89, 0xae, 0xa7, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xcb, 0x90, 0xc1, 0x7e, 0x2d, 0x43, 0x86, + 0x7a, 0xf8, 0xc1, 0xfd, 0xac, 0x05, 0xe3, 0x6a, 0x98, 0xde, 0x21, 0x56, 0xea, 0xaa, 0x3f, 0x39, + 0x07, 0x68, 0x4d, 0xeb, 0x32, 0xbb, 0x58, 0xbe, 0x9d, 0xf9, 0x33, 0x3a, 0x2d, 0xf7, 0x2d, 0xa2, + 0x62, 0x06, 0xce, 0x09, 0xff, 0x44, 0x51, 0x7a, 0xff, 0x60, 0x6e, 0x5c, 0xfd, 0xe3, 0x31, 0x8a, + 0xe3, 0x2a, 0xf4, 0x48, 0x9e, 0x4c, 0x2c, 0x45, 0xf4, 0x22, 0x0c, 0x76, 0x76, 0x9c, 0x90, 0x24, + 0xbc, 0x79, 0x06, 0x6b, 0xb4, 0xf0, 0xfe, 0xc1, 0xdc, 0x84, 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xee, + 0x3f, 0x16, 0x58, 0x7a, 0x71, 0xf6, 0x8c, 0x05, 0xf6, 0x27, 0x16, 0x0c, 0xac, 0xfb, 0xcd, 0xe3, + 0x38, 0x02, 0x5e, 0x35, 0x8e, 0x80, 0xc7, 0xf2, 0xc2, 0xc7, 0xe7, 0xee, 0xfe, 0x95, 0xc4, 0xee, + 0x3f, 0x97, 0x4b, 0xa1, 0x78, 0xe3, 0xb7, 0x61, 0x94, 0x05, 0xa5, 0x17, 0x9e, 0x4b, 0xcf, 0x1b, + 0x1b, 0x7e, 0x2e, 0xb1, 0xe1, 0x27, 0x35, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xc3, 0xc2, 0x15, 0x26, + 0xe9, 0x16, 0x2a, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x51, 0x06, 0x23, 0x08, 0x3e, 0xfa, 0x65, 0x0b, + 0xe6, 0x03, 0x6e, 0x22, 0xdb, 0xac, 0x76, 0x03, 0xd7, 0xdb, 0xae, 0x37, 0x76, 0x48, 0xb3, 0xdb, + 0x72, 0xbd, 0xed, 0xd5, 0x6d, 0xcf, 0x57, 0xc5, 0xcb, 0x77, 0x49, 0xa3, 0xcb, 0xd4, 0x6e, 0x3d, + 0x22, 0xee, 0x2b, 0x53, 0xf3, 0xe7, 0xee, 0x1d, 0xcc, 0xcd, 0xe3, 0x43, 0xd1, 0xc6, 0x87, 0xec, + 0x0b, 0xfa, 0x75, 0x0b, 0x2e, 0xf1, 0xd8, 0xf0, 0xfd, 0xf7, 0xbf, 0xe0, 0xb5, 0x5c, 0x93, 0xa4, + 0x62, 0x22, 0x1b, 0x24, 0x68, 0x2f, 0x7e, 0x48, 0x0c, 0xe8, 0xa5, 0xda, 0xe1, 0xda, 0xc2, 0x87, + 0xed, 0x9c, 0xfd, 0xcf, 0xca, 0x30, 0x2e, 0x62, 0x46, 0x89, 0x3b, 0xe0, 0x45, 0x63, 0x49, 0x3c, + 0x9e, 0x58, 0x12, 0xd3, 0x06, 0xf2, 0xd1, 0x1c, 0xff, 0x21, 0x4c, 0xd3, 0xc3, 0xf9, 0x2a, 0x71, + 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe0, 0xab, 0x7c, 0xe8, 0xd3, 0x5f, 0xc9, 0x27, 0xaf, 0x27, 0x89, + 0xe1, 0x34, 0xfd, 0x6f, 0xa5, 0x3b, 0xc7, 0x83, 0xa9, 0x54, 0xd8, 0xaf, 0x4f, 0x40, 0x45, 0xf9, + 0x71, 0x88, 0x43, 0xa7, 0x38, 0x7a, 0x5e, 0x92, 0x02, 0x17, 0x7f, 0xc5, 0x3e, 0x44, 0x31, 0x39, + 0xfb, 0x1f, 0x94, 0x8c, 0x06, 0xf9, 0x24, 0xae, 0xc3, 0x88, 0x13, 0x86, 0xee, 0xb6, 0x47, 0x9a, + 0x45, 0x12, 0xca, 0x54, 0x33, 0xcc, 0x97, 0x66, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xca, 0xcd, + 0xea, 0xf6, 0x48, 0x91, 0x78, 0x32, 0x45, 0x0d, 0xa4, 0xe1, 0xdd, 0x1e, 0xc1, 0xa2, 0x3e, 0xfa, + 0x24, 0xb7, 0x7b, 0xbc, 0xe6, 0xf9, 0x77, 0xbc, 0x2b, 0xbe, 0x2f, 0xe3, 0x32, 0xf4, 0x47, 0x70, + 0x5a, 0x5a, 0x3b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f, 0x1c, 0xcd, 0xcf, 0xc2, 0x09, 0x4a, 0xda, + 0x74, 0x9b, 0x0e, 0x11, 0x81, 0x49, 0x11, 0x90, 0x4c, 0x96, 0x89, 0xb1, 0xcb, 0x7c, 0xca, 0x99, + 0xb5, 0x63, 0x41, 0xfa, 0x35, 0x93, 0x04, 0x4e, 0xd2, 0xb4, 0x7f, 0xda, 0x02, 0xe6, 0x42, 0x7a, + 0x0c, 0xfc, 0xc8, 0x47, 0x4c, 0x7e, 0x64, 0x26, 0x6f, 0x90, 0x73, 0x58, 0x91, 0x17, 0xf8, 0xca, + 0xaa, 0x05, 0xfe, 0xdd, 0x7d, 0x61, 0xac, 0xd2, 0xfb, 0xfd, 0x61, 0xff, 0x1f, 0x8b, 0x1f, 0x62, + 0xca, 0xcb, 0x02, 0x7d, 0x27, 0x8c, 0x34, 0x9c, 0x8e, 0xd3, 0xe0, 0x19, 0x5b, 0x72, 0x25, 0x7a, + 0x46, 0xa5, 0xf9, 0x25, 0x51, 0x83, 0x4b, 0xa8, 0x64, 0x60, 0xbb, 0x11, 0x59, 0xdc, 0x53, 0x2a, + 0xa5, 0x9a, 0x9c, 0xdd, 0x85, 0x71, 0x83, 0xd8, 0x43, 0x15, 0x67, 0x7c, 0x27, 0xbf, 0x62, 0x55, + 0x20, 0xc6, 0x36, 0x4c, 0x7b, 0xda, 0x7f, 0x7a, 0xa1, 0xc8, 0xc7, 0xe5, 0x7b, 0x7b, 0x5d, 0xa2, + 0xec, 0xf6, 0xd1, 0xbc, 0x53, 0x13, 0x64, 0x70, 0x9a, 0xb2, 0xfd, 0x93, 0x16, 0x3c, 0xa2, 0x23, + 0x6a, 0x0e, 0x30, 0xbd, 0x94, 0x24, 0x55, 0x18, 0xf1, 0x3b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xb7, + 0xc6, 0x45, 0x39, 0xe8, 0x37, 0x44, 0xf9, 0x7d, 0x11, 0xef, 0x5c, 0x52, 0x97, 0xe5, 0x58, 0xd5, + 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0xa1, 0x70, 0x75, 0x62, 0x67, 0x00, 0xd3, 0xa4, 0x87, 0x58, 0x40, + 0xec, 0xaf, 0x5b, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x6f, 0xc2, 0x54, 0xdb, 0x89, 0x1a, 0x3b, 0xcb, + 0x77, 0x3b, 0x01, 0x57, 0x39, 0xc9, 0x71, 0x7a, 0xba, 0xd7, 0x38, 0x69, 0x1f, 0x19, 0x9b, 0x72, + 0xae, 0x25, 0x88, 0xe1, 0x14, 0x79, 0xb4, 0x09, 0xa3, 0xac, 0x8c, 0x79, 0xf1, 0x85, 0x45, 0xac, + 0x41, 0x5e, 0x6b, 0xca, 0x18, 0x61, 0x2d, 0xa6, 0x83, 0x75, 0xa2, 0xf6, 0x97, 0xcb, 0x7c, 0xb7, + 0x33, 0x56, 0xfe, 0x29, 0x18, 0xee, 0xf8, 0xcd, 0xa5, 0xd5, 0x2a, 0x16, 0xb3, 0xa0, 0xae, 0x91, + 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x2e, 0xc2, 0x88, 0xf8, 0x29, 0x55, 0x84, 0xec, 0x6c, 0x16, 0x78, + 0x21, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x27, 0xf0, 0xf7, 0xdc, 0x26, 0x8b, 0x2e, 0x51, 0x36, 0xed, + 0x88, 0x6a, 0x0a, 0x82, 0x35, 0x2c, 0xf4, 0x0a, 0x8c, 0x77, 0xbd, 0x90, 0xb3, 0x23, 0x5a, 0x2c, + 0x59, 0x65, 0xe1, 0x72, 0x53, 0x07, 0x62, 0x13, 0x17, 0x2d, 0xc0, 0x50, 0xe4, 0x30, 0xbb, 0x98, + 0xc1, 0x7c, 0x73, 0xdf, 0x0d, 0x8a, 0xa1, 0x27, 0x07, 0xa1, 0x15, 0xb0, 0xa8, 0x88, 0x3e, 0x21, + 0x1d, 0x6a, 0xf9, 0xc1, 0x2e, 0xec, 0xec, 0xfb, 0xbb, 0x04, 0x34, 0x77, 0x5a, 0x61, 0xbf, 0x6f, + 0xd0, 0x42, 0x2f, 0x03, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa5, 0xac, 0xd9, 0x14, 0x5f, 0x50, + 0xf5, 0xd7, 0xfd, 0xe8, 0x66, 0x48, 0x96, 0x15, 0x06, 0xd6, 0xb0, 0xed, 0x5f, 0xaf, 0x00, 0xc4, + 0x7c, 0x3b, 0x7a, 0x2b, 0x75, 0x70, 0x3d, 0x53, 0xcc, 0xe9, 0x1f, 0xdd, 0xa9, 0x85, 0xbe, 0xcf, + 0x82, 0x51, 0xa7, 0xd5, 0xf2, 0x1b, 0x0e, 0x8f, 0xf6, 0x5b, 0x2a, 0x3e, 0x38, 0x45, 0xfb, 0x0b, + 0x71, 0x0d, 0xde, 0x85, 0xe7, 0xe5, 0x0a, 0xd5, 0x20, 0x3d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x20, + 0x9f, 0x8a, 0x65, 0x63, 0x28, 0xd5, 0x53, 0xb1, 0xc2, 0xee, 0x08, 0xfd, 0x95, 0x78, 0xd3, 0x78, + 0x25, 0x0e, 0xe4, 0x7b, 0x0c, 0x1a, 0xec, 0x6b, 0xaf, 0x07, 0x22, 0xaa, 0xe9, 0xd1, 0x03, 0x06, + 0xf3, 0xdd, 0xf3, 0xb4, 0x77, 0x52, 0x8f, 0xc8, 0x01, 0x9f, 0x81, 0xc9, 0xa6, 0xc9, 0x04, 0x88, + 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0xcf, 0x10, 0x5f, 0xfb, 0x09, 0x00, 0x4e, 0x12, 0x46, 0x35, + 0x1e, 0x4c, 0x62, 0xd5, 0xdb, 0xf2, 0x85, 0xaf, 0x87, 0x9d, 0x3b, 0x97, 0xfb, 0x61, 0x44, 0xda, + 0x14, 0x33, 0xbe, 0xdd, 0xd7, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0xd7, 0x60, 0x88, 0xf9, 0x67, 0x85, + 0x33, 0x23, 0xf9, 0x12, 0x67, 0x33, 0x3a, 0x5a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, + 0xaa, 0xf4, 0x7e, 0x0c, 0x57, 0xbd, 0x9b, 0x21, 0x61, 0xde, 0x8f, 0x95, 0xc5, 0xf7, 0xc6, 0x8e, + 0x8d, 0xbc, 0x3c, 0x33, 0x85, 0x98, 0x51, 0x93, 0x72, 0x51, 0xe2, 0xbf, 0xcc, 0x4c, 0x36, 0x03, + 0xf9, 0xdd, 0x33, 0xb3, 0x97, 0xc5, 0xc3, 0x79, 0xcb, 0x24, 0x81, 0x93, 0x34, 0x29, 0x47, 0xca, + 0x77, 0xbd, 0xf0, 0x16, 0xe9, 0x75, 0x76, 0xf0, 0x87, 0x38, 0xbb, 0x8d, 0x78, 0x09, 0x16, 0xf5, + 0x8f, 0x95, 0x3d, 0x98, 0xf5, 0x60, 0x2a, 0xb9, 0x45, 0x1f, 0x2a, 0x3b, 0xf2, 0x07, 0x03, 0x30, + 0x61, 0x2e, 0x29, 0x74, 0x09, 0x2a, 0x82, 0x88, 0xca, 0x26, 0xa0, 0x76, 0xc9, 0x9a, 0x04, 0xe0, + 0x18, 0x87, 0x25, 0x91, 0x60, 0xd5, 0x35, 0xf3, 0xe0, 0x38, 0x89, 0x84, 0x82, 0x60, 0x0d, 0x8b, + 0x3e, 0xac, 0x36, 0x7d, 0x3f, 0x52, 0x17, 0x92, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, + 0x88, 0x76, 0x49, 0xe0, 0x91, 0x96, 0x19, 0x77, 0x58, 0x5d, 0x44, 0xd7, 0x74, 0x20, 0x36, 0x71, + 0xe9, 0x75, 0xea, 0x87, 0x6c, 0x21, 0x8b, 0xe7, 0x5b, 0x6c, 0x6e, 0x5d, 0xe7, 0x0e, 0xd8, 0x12, + 0x8e, 0x3e, 0x0e, 0x8f, 0xa8, 0xd8, 0x4a, 0x98, 0x6b, 0x33, 0x64, 0x8b, 0x43, 0x86, 0xb4, 0xe5, + 0x91, 0xa5, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, 0x13, 0x82, 0xc5, 0x97, 0x14, 0x87, 0x4d, + 0x0b, 0xa3, 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xe4, 0x64, 0xc6, 0x65, 0x4b, 0x0a, 0x23, 0xe9, + 0xc8, 0xc9, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0, 0x05, 0x98, 0xe4, 0x3c, 0x98, 0xeb, 0x6d, 0xf3, 0x39, + 0x11, 0xce, 0x5c, 0x6a, 0x4b, 0xdd, 0x30, 0xc1, 0x38, 0x89, 0x8f, 0x5e, 0x82, 0x31, 0x27, 0x68, + 0xec, 0xb8, 0x11, 0x69, 0x44, 0xdd, 0x80, 0x7b, 0x79, 0x69, 0x26, 0x5a, 0x0b, 0x1a, 0x0c, 0x1b, + 0x98, 0xf6, 0x5b, 0x70, 0x22, 0x23, 0x32, 0x03, 0x5d, 0x38, 0x4e, 0xc7, 0x95, 0xdf, 0x94, 0xb0, + 0x70, 0x5e, 0xa8, 0xad, 0xca, 0xaf, 0xd1, 0xb0, 0xe8, 0xea, 0x64, 0x11, 0x1c, 0xb4, 0x44, 0x84, + 0x6a, 0x75, 0xae, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x51, 0x82, 0xc9, 0x0c, 0xdd, 0x0a, 0x4b, + 0x86, 0x97, 0x78, 0xa4, 0xc4, 0xb9, 0xef, 0xcc, 0x40, 0xdc, 0xa5, 0x43, 0x04, 0xe2, 0x2e, 0xf7, + 0x0a, 0xc4, 0x3d, 0xf0, 0x76, 0x02, 0x71, 0x9b, 0x23, 0x36, 0xd8, 0xd7, 0x88, 0x65, 0x04, 0xef, + 0x1e, 0x3a, 0x64, 0xf0, 0x6e, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2d, 0xc1, 0x54, 0xd2, + 0x94, 0xf4, 0x18, 0xe4, 0xb6, 0xaf, 0x19, 0x72, 0xdb, 0x8b, 0xfd, 0x38, 0xdf, 0xe6, 0xca, 0x70, + 0x71, 0x42, 0x86, 0xfb, 0xfe, 0xbe, 0xa8, 0x15, 0xcb, 0x73, 0xff, 0x46, 0x09, 0x4e, 0x65, 0x7a, + 0xff, 0x1e, 0xc3, 0xd8, 0xdc, 0x30, 0xc6, 0xe6, 0xd9, 0xbe, 0x1d, 0x93, 0x73, 0x07, 0xe8, 0x76, + 0x62, 0x80, 0x2e, 0xf5, 0x4f, 0xb2, 0x78, 0x94, 0xbe, 0x56, 0x86, 0x73, 0x99, 0xf5, 0x62, 0xb1, + 0xe7, 0x8a, 0x21, 0xf6, 0x7c, 0x2e, 0x21, 0xf6, 0xb4, 0x8b, 0x6b, 0x1f, 0x8d, 0x1c, 0x54, 0x38, + 0xe8, 0xb2, 0x30, 0x03, 0x0f, 0x28, 0x03, 0x35, 0x1c, 0x74, 0x15, 0x21, 0x6c, 0xd2, 0xfd, 0x56, + 0x92, 0x7d, 0xfe, 0x5b, 0x0b, 0xce, 0x64, 0xce, 0xcd, 0x31, 0xc8, 0xba, 0xd6, 0x4d, 0x59, 0xd7, + 0x53, 0x7d, 0xaf, 0xd6, 0x1c, 0xe1, 0xd7, 0x97, 0x07, 0x73, 0xbe, 0x85, 0xbd, 0xe4, 0x6f, 0xc0, + 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf3, 0x9b, 0x2a, 0xd6, 0xf0, 0xb3, 0xec, 0x9d, 0x15, 0x17, + 0xdf, 0x3f, 0x98, 0x9b, 0x4d, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x09, 0x23, 0xa1, 0xb8, + 0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0x9b, 0xa4, 0x65, 0x06, 0x43, 0x52, 0x92, 0x0a, + 0x45, 0xd2, 0x0c, 0x9c, 0x52, 0x3a, 0xd2, 0xc0, 0x29, 0xcf, 0x01, 0xec, 0xa9, 0xc7, 0x40, 0x52, + 0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x51, 0x98, 0x0a, 0x79, 0xb4, 0xc0, 0xa5, 0x96, 0x13, + 0x32, 0x3f, 0x1a, 0xb1, 0x0a, 0x59, 0xc0, 0xa5, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x45, 0xb6, + 0xca, 0x42, 0x1b, 0xf2, 0x85, 0x79, 0x21, 0x6e, 0x51, 0xa4, 0xe2, 0x3d, 0x99, 0x1c, 0x7e, 0x36, + 0xf0, 0x5a, 0x4d, 0xf4, 0x49, 0x00, 0xba, 0x7c, 0x84, 0x1c, 0x62, 0x38, 0xff, 0xf0, 0xa4, 0xa7, + 0x4a, 0x33, 0xd3, 0xb8, 0x99, 0xf9, 0xd4, 0x56, 0x15, 0x11, 0xac, 0x11, 0x44, 0x5b, 0x30, 0x1e, + 0xff, 0x8b, 0x33, 0x55, 0x1e, 0xb2, 0x05, 0x26, 0xf7, 0xae, 0xea, 0x74, 0xb0, 0x49, 0xd6, 0xfe, + 0xf1, 0x61, 0x78, 0xb4, 0xe0, 0x2c, 0x46, 0x0b, 0xa6, 0xbe, 0xf7, 0xe9, 0xe4, 0x23, 0x7e, 0x36, + 0xb3, 0xb2, 0xf1, 0xaa, 0x4f, 0x2c, 0xf9, 0xd2, 0xdb, 0x5e, 0xf2, 0x3f, 0x64, 0x69, 0xe2, 0x15, + 0x6e, 0x59, 0xfa, 0x91, 0x43, 0xde, 0x31, 0x47, 0x28, 0x6f, 0xd9, 0xca, 0x10, 0x5a, 0x3c, 0xd7, + 0x77, 0x77, 0xfa, 0x97, 0x62, 0x7c, 0xc5, 0x02, 0x24, 0xc4, 0x2b, 0xa4, 0xa9, 0x36, 0x94, 0x90, + 0x67, 0x5c, 0x39, 0xec, 0xf7, 0x2f, 0xa4, 0x28, 0xf1, 0x91, 0x78, 0x59, 0x5e, 0x06, 0x69, 0x84, + 0x9e, 0x63, 0x92, 0xd1, 0x3d, 0xf4, 0x71, 0x16, 0x4d, 0xd7, 0x7d, 0x4b, 0x70, 0x40, 0x62, 0xc3, + 0xbd, 0x28, 0x22, 0xe9, 0xaa, 0x72, 0xca, 0xea, 0x66, 0x76, 0x57, 0x47, 0xc2, 0x06, 0xa9, 0xe3, + 0x7d, 0x7f, 0x77, 0xe1, 0x91, 0x9c, 0x21, 0x7b, 0xa8, 0xcf, 0xf0, 0xdf, 0xb2, 0xe0, 0x6c, 0x61, + 0x58, 0x98, 0x6f, 0x42, 0x06, 0xd1, 0xfe, 0x9c, 0x05, 0xd9, 0x93, 0x6d, 0x98, 0x95, 0x5d, 0x82, + 0x4a, 0x83, 0x16, 0x6a, 0x7e, 0xc0, 0x71, 0x80, 0x04, 0x09, 0xc0, 0x31, 0x8e, 0x61, 0x3d, 0x56, + 0xea, 0x69, 0x3d, 0xf6, 0x2b, 0x16, 0xa4, 0x0e, 0xf9, 0x63, 0xe0, 0x36, 0x56, 0x4d, 0x6e, 0xe3, + 0xbd, 0xfd, 0x8c, 0x66, 0x0e, 0xa3, 0xf1, 0xc7, 0x93, 0x70, 0x3a, 0xc7, 0x2d, 0x6f, 0x0f, 0xa6, + 0xb7, 0x1b, 0xc4, 0xf4, 0xb0, 0x2e, 0x8a, 0x3c, 0x54, 0xe8, 0x8e, 0xcd, 0x92, 0xc3, 0x4e, 0xa7, + 0x50, 0x70, 0xba, 0x09, 0xf4, 0x39, 0x0b, 0x4e, 0x3a, 0x77, 0xc2, 0x65, 0xca, 0x35, 0xba, 0x8d, + 0xc5, 0x96, 0xdf, 0xd8, 0xa5, 0x57, 0xb2, 0xdc, 0x08, 0x2f, 0x64, 0x4a, 0xf2, 0x6e, 0xd7, 0x53, + 0xf8, 0x46, 0xf3, 0x2c, 0x5b, 0x6e, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x91, 0x42, 0x81, 0xbe, + 0x49, 0x0b, 0x62, 0x00, 0x64, 0xf9, 0x4f, 0x72, 0x36, 0x48, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0d, + 0x95, 0x6d, 0xe9, 0xee, 0x9b, 0xc1, 0x66, 0xc5, 0x03, 0x59, 0xec, 0x04, 0xcd, 0xd5, 0xf1, 0x0a, + 0x09, 0xc7, 0x44, 0xd1, 0xab, 0x50, 0xf6, 0xb6, 0xc2, 0xa2, 0x84, 0xb3, 0x09, 0xbb, 0x4b, 0x1e, + 0x69, 0x63, 0x7d, 0xa5, 0x8e, 0x69, 0x45, 0x74, 0x15, 0xca, 0xc1, 0x66, 0x53, 0x88, 0xa1, 0x33, + 0x37, 0x29, 0x5e, 0xac, 0xe6, 0xf4, 0x8a, 0x51, 0xc2, 0x8b, 0x55, 0x4c, 0x49, 0xa0, 0x1a, 0x0c, + 0x32, 0x5f, 0x36, 0xc1, 0xd4, 0x64, 0x3e, 0xdf, 0x0a, 0x7c, 0x42, 0x79, 0x38, 0x0e, 0x86, 0x80, + 0x39, 0x21, 0xb4, 0x01, 0x43, 0x0d, 0x96, 0x9c, 0x54, 0x70, 0x31, 0x1f, 0xc8, 0x14, 0x38, 0x17, + 0x64, 0x6d, 0x15, 0xf2, 0x57, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x74, 0x76, 0xb6, 0x42, 0x91, + 0x4c, 0x3b, 0x9b, 0x6a, 0x41, 0x32, 0x62, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0xbd, 0x0c, 0xa5, + 0xad, 0x86, 0xf0, 0x53, 0xcb, 0x94, 0x3c, 0x9b, 0xc1, 0x52, 0x16, 0x87, 0xee, 0x1d, 0xcc, 0x95, + 0x56, 0x96, 0x70, 0x69, 0xab, 0x81, 0xd6, 0x61, 0x78, 0x8b, 0x87, 0x57, 0x10, 0xc2, 0xe5, 0x27, + 0xb3, 0x23, 0x3f, 0xa4, 0x22, 0x30, 0x70, 0x9f, 0x27, 0x01, 0xc0, 0x92, 0x08, 0xcb, 0x48, 0xa0, + 0xc2, 0x44, 0x88, 0x28, 0x75, 0xf3, 0x87, 0x0b, 0xed, 0xc1, 0x99, 0xcc, 0x38, 0xd8, 0x04, 0xd6, + 0x28, 0xd2, 0x55, 0xed, 0xbc, 0xd5, 0x0d, 0x58, 0x28, 0x70, 0x11, 0xce, 0x28, 0x73, 0x55, 0x2f, + 0x48, 0xa4, 0xa2, 0x55, 0xad, 0x90, 0x70, 0x4c, 0x14, 0xed, 0xc2, 0xf8, 0x5e, 0xd8, 0xd9, 0x21, + 0x72, 0x4b, 0xb3, 0xe8, 0x46, 0x39, 0xfc, 0xd1, 0x2d, 0x81, 0xe8, 0x06, 0x51, 0xd7, 0x69, 0xa5, + 0x4e, 0x21, 0xc6, 0xcb, 0xde, 0xd2, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x37, 0xbb, 0xfe, 0xe6, + 0x7e, 0x44, 0x44, 0x70, 0xb9, 0xcc, 0xe1, 0x7f, 0x9d, 0xa3, 0xa4, 0x87, 0x5f, 0x00, 0xb0, 0x24, + 0x82, 0x6e, 0x89, 0xe1, 0x61, 0xa7, 0xe7, 0x54, 0x7e, 0x04, 0xd8, 0x05, 0x89, 0x94, 0x33, 0x28, + 0xec, 0xb4, 0x8c, 0x49, 0xb1, 0x53, 0xb2, 0xb3, 0xe3, 0x47, 0xbe, 0x97, 0x38, 0xa1, 0xa7, 0xf3, + 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xfa, 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0xa1, 0x26, 0x4c, 0x74, + 0xfc, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7d, 0xa1, 0x02, 0xe1, 0x98, 0x81, 0x29, 0x5a, 0x64, 0x71, + 0x1b, 0x4d, 0x08, 0x4e, 0xd0, 0x44, 0x1f, 0x83, 0xe1, 0xb0, 0xe1, 0xb4, 0xc8, 0xea, 0x8d, 0x99, + 0x13, 0xf9, 0xd7, 0x4f, 0x9d, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x1d, 0x83, 0xa3, 0x60, 0x49, 0x0e, + 0xad, 0xc0, 0x20, 0xcb, 0x38, 0xc7, 0x22, 0x21, 0xe6, 0x04, 0xb2, 0x4d, 0x59, 0xc1, 0xf3, 0xb3, + 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xf1, 0x46, 0xf4, 0xc3, 0x99, 0x53, 0xf9, 0x7b, 0x40, + 0x3c, 0x2d, 0x6f, 0xd4, 0x8b, 0xf6, 0x80, 0x42, 0xc2, 0x31, 0x51, 0x7a, 0x32, 0xd3, 0xd3, 0xf4, + 0x74, 0x81, 0xf9, 0x56, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0d, + 0xa7, 0x79, 0x16, 0x26, 0x55, 0xf8, 0x1e, 0x2b, 0xa5, 0x70, 0xfe, 0x60, 0xbf, 0x42, 0xce, 0x23, + 0x7c, 0x0a, 0x7d, 0xce, 0x82, 0xd3, 0x9d, 0xcc, 0x0f, 0x11, 0x0c, 0x40, 0x7f, 0xb2, 0x52, 0xfe, + 0xe9, 0x2a, 0x6a, 0x66, 0x36, 0x1c, 0xe7, 0xb4, 0x94, 0x7c, 0x6e, 0x96, 0xdf, 0xf6, 0x73, 0x73, + 0x0d, 0x46, 0x1a, 0xfc, 0x29, 0x52, 0x98, 0xac, 0x3b, 0xf9, 0xf6, 0x66, 0xac, 0x84, 0x78, 0xc3, + 0x6c, 0x61, 0x45, 0x02, 0xfd, 0xb0, 0x05, 0x67, 0x93, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xa8, 0x4d, + 0x2e, 0xd0, 0x58, 0x11, 0xdf, 0x9f, 0xe2, 0xff, 0x0d, 0xe4, 0xfb, 0xbd, 0x10, 0x70, 0x71, 0x63, + 0xa8, 0x9a, 0x21, 0x51, 0x19, 0x32, 0xb5, 0x48, 0x7d, 0x48, 0x55, 0x5e, 0x80, 0xb1, 0xb6, 0xdf, + 0xf5, 0x22, 0x61, 0xed, 0x25, 0x2c, 0x4f, 0x98, 0xc5, 0xc5, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0x84, + 0x2c, 0x66, 0xe4, 0x81, 0x65, 0x31, 0x6f, 0xc0, 0x98, 0xa7, 0x99, 0x27, 0x0b, 0x7e, 0xe0, 0x42, + 0x7e, 0x98, 0x5c, 0xdd, 0x98, 0x99, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0xe3, 0x35, 0x03, 0xfb, + 0x92, 0x95, 0xc1, 0xd4, 0x73, 0x51, 0xcc, 0x87, 0x4d, 0x51, 0xcc, 0x85, 0xa4, 0x28, 0x26, 0xa5, + 0x41, 0x30, 0xa4, 0x30, 0xfd, 0x67, 0x01, 0xea, 0x37, 0xd4, 0xa6, 0xdd, 0x82, 0xf3, 0xbd, 0xae, + 0x25, 0x66, 0xf6, 0xd7, 0x54, 0xfa, 0xe2, 0xd8, 0xec, 0xaf, 0xb9, 0x5a, 0xc5, 0x0c, 0xd2, 0x6f, + 0x10, 0x27, 0xfb, 0xbf, 0x59, 0x50, 0xae, 0xf9, 0xcd, 0x63, 0x78, 0xf0, 0x7e, 0xc4, 0x78, 0xf0, + 0x3e, 0x9a, 0x7d, 0x21, 0x36, 0x73, 0xf5, 0x1f, 0xcb, 0x09, 0xfd, 0xc7, 0xd9, 0x3c, 0x02, 0xc5, + 0xda, 0x8e, 0x9f, 0x2a, 0xc3, 0x68, 0xcd, 0x6f, 0x2a, 0x9b, 0xfb, 0x7f, 0xf1, 0x20, 0x36, 0xf7, + 0xb9, 0xb9, 0x2c, 0x34, 0xca, 0xcc, 0x5a, 0x50, 0xba, 0x1b, 0x7f, 0x93, 0x99, 0xde, 0xdf, 0x26, + 0xee, 0xf6, 0x4e, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0x3e, 0xd3, 0xfb, 0xdf, 0x2b, 0xc1, 0x64, 0xa2, + 0x75, 0xd4, 0x82, 0xf1, 0x96, 0x2e, 0x5d, 0x17, 0xeb, 0xf4, 0x81, 0x04, 0xf3, 0xc2, 0x74, 0x59, + 0x2b, 0xc2, 0x26, 0x71, 0x34, 0x0f, 0xa0, 0xd4, 0xcd, 0x52, 0xbc, 0xca, 0xb8, 0x7e, 0xa5, 0x8f, + 0x0e, 0xb1, 0x86, 0x81, 0x5e, 0x84, 0xd1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x7b, 0xff, 0x1a, 0x91, + 0xf1, 0xbd, 0x94, 0x41, 0xe2, 0x46, 0x0c, 0xc2, 0x3a, 0x1e, 0xba, 0x0b, 0xd3, 0x8a, 0x48, 0xfd, + 0x08, 0x34, 0x0e, 0x4c, 0xaa, 0xb0, 0x9e, 0xa4, 0x88, 0xd3, 0x8d, 0xd8, 0x3f, 0x53, 0xe6, 0x43, + 0xec, 0x45, 0xee, 0xbb, 0xbb, 0xe1, 0x9d, 0xbd, 0x1b, 0xbe, 0x66, 0xc1, 0x14, 0x6d, 0x9d, 0x59, + 0x5b, 0xc9, 0x6b, 0x5e, 0x05, 0xe6, 0xb6, 0x0a, 0x02, 0x73, 0x5f, 0xa0, 0xa7, 0x66, 0xd3, 0xef, + 0x46, 0x42, 0x76, 0xa7, 0x1d, 0x8b, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, 0x3c, 0x44, + 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xdc, 0xee, 0x81, 0xec, 0xb8, 0xdd, 0x3c, 0xfc, 0xaa, + 0xb0, 0xcb, 0x11, 0x0c, 0x97, 0x16, 0x7e, 0x55, 0x1a, 0xec, 0xc4, 0x38, 0xf6, 0x57, 0xca, 0x30, + 0x56, 0xf3, 0x9b, 0xb1, 0xaa, 0xf9, 0x05, 0x43, 0xd5, 0x7c, 0x3e, 0xa1, 0x6a, 0x9e, 0xd2, 0x71, + 0xdf, 0x55, 0x2c, 0x7f, 0xa3, 0x14, 0xcb, 0xff, 0xd4, 0x62, 0xb3, 0x56, 0x5d, 0xaf, 0x73, 0xe3, + 0x3d, 0x74, 0x19, 0x46, 0xd9, 0x01, 0xc3, 0x5c, 0x92, 0xa5, 0xfe, 0x95, 0xe5, 0xa3, 0x5a, 0x8f, + 0x8b, 0xb1, 0x8e, 0x83, 0x2e, 0xc2, 0x48, 0x48, 0x9c, 0xa0, 0xb1, 0xa3, 0x4e, 0x57, 0xa1, 0x2c, + 0xe5, 0x65, 0x58, 0x41, 0xd1, 0xeb, 0x71, 0xe4, 0xcf, 0x72, 0xbe, 0x8b, 0xa3, 0xde, 0x1f, 0xbe, + 0x45, 0xf2, 0xc3, 0x7d, 0xda, 0xb7, 0x01, 0xa5, 0xf1, 0xfb, 0x88, 0x4d, 0x37, 0x67, 0xc6, 0xa6, + 0xab, 0xa4, 0xe2, 0xd2, 0xfd, 0x99, 0x05, 0x13, 0x35, 0xbf, 0x49, 0xb7, 0xee, 0xb7, 0xd2, 0x3e, + 0xd5, 0xc3, 0x1e, 0x0f, 0x15, 0x84, 0x3d, 0x7e, 0x02, 0x06, 0x6b, 0x7e, 0x73, 0xb5, 0x56, 0x14, + 0x5f, 0xc0, 0xfe, 0x9b, 0x16, 0x0c, 0xd7, 0xfc, 0xe6, 0x31, 0xa8, 0x05, 0x3e, 0x6c, 0xaa, 0x05, + 0x1e, 0xc9, 0x59, 0x37, 0x39, 0x9a, 0x80, 0xbf, 0x3e, 0x00, 0xe3, 0xb4, 0x9f, 0xfe, 0xb6, 0x9c, + 0x4a, 0x63, 0xd8, 0xac, 0x3e, 0x86, 0x8d, 0x72, 0xe1, 0x7e, 0xab, 0xe5, 0xdf, 0x49, 0x4e, 0xeb, + 0x0a, 0x2b, 0xc5, 0x02, 0x8a, 0x9e, 0x81, 0x91, 0x4e, 0x40, 0xf6, 0x5c, 0x5f, 0xb0, 0xb7, 0x9a, + 0x92, 0xa5, 0x26, 0xca, 0xb1, 0xc2, 0xa0, 0xcf, 0xc2, 0xd0, 0xf5, 0xe8, 0x55, 0xde, 0xf0, 0xbd, + 0x26, 0x97, 0x9c, 0x97, 0x45, 0x6e, 0x0e, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, + 0x76, 0xec, 0x1c, 0x3e, 0xcb, 0xab, 0xc8, 0xfa, 0x27, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x0e, 0x20, + 0x92, 0xf1, 0xed, 0x43, 0x11, 0x6d, 0x4d, 0x3d, 0x05, 0x54, 0xe4, 0xfb, 0x10, 0x6b, 0x58, 0xe8, + 0x69, 0xa8, 0x44, 0x8e, 0xdb, 0xba, 0xee, 0x7a, 0x24, 0x64, 0x12, 0xf1, 0xb2, 0x4c, 0xbe, 0x27, + 0x0a, 0x71, 0x0c, 0xa7, 0xac, 0x18, 0x8b, 0xc4, 0xc1, 0x73, 0x44, 0x8f, 0x30, 0x6c, 0xc6, 0x8a, + 0x5d, 0x57, 0xa5, 0x58, 0xc3, 0x40, 0x3b, 0xf0, 0x98, 0xeb, 0xb1, 0x3c, 0x16, 0xa4, 0xbe, 0xeb, + 0x76, 0x36, 0xae, 0xd7, 0x6f, 0x91, 0xc0, 0xdd, 0xda, 0x5f, 0x74, 0x1a, 0xbb, 0xc4, 0x93, 0xf9, + 0x3b, 0xdf, 0x2b, 0xba, 0xf8, 0xd8, 0x6a, 0x01, 0x2e, 0x2e, 0xa4, 0x64, 0x3f, 0xcf, 0xd6, 0xfb, + 0x8d, 0x3a, 0x7a, 0xbf, 0x71, 0x74, 0x9c, 0xd6, 0x8f, 0x8e, 0xfb, 0x07, 0x73, 0x43, 0x37, 0xea, + 0x5a, 0x20, 0x89, 0x97, 0xe0, 0x54, 0xcd, 0x6f, 0xd6, 0xfc, 0x20, 0x5a, 0xf1, 0x83, 0x3b, 0x4e, + 0xd0, 0x94, 0xcb, 0x6b, 0x4e, 0x86, 0xd2, 0xa0, 0xe7, 0xe7, 0x20, 0x3f, 0x5d, 0x8c, 0x30, 0x19, + 0xcf, 0x33, 0x8e, 0xed, 0x90, 0x0e, 0x60, 0x0d, 0xc6, 0x3b, 0xa8, 0x4c, 0x30, 0x57, 0x9c, 0x88, + 0xa0, 0x1b, 0x2c, 0xc3, 0x75, 0x7c, 0x8d, 0x8a, 0xea, 0x4f, 0x69, 0x19, 0xae, 0x63, 0x60, 0xe6, + 0xbd, 0x6b, 0xd6, 0xb7, 0xff, 0xfb, 0x20, 0x3b, 0x51, 0x13, 0xd9, 0x44, 0xd0, 0xa7, 0x60, 0x22, + 0x24, 0xd7, 0x5d, 0xaf, 0x7b, 0x57, 0x8a, 0x30, 0x0a, 0x5c, 0xf8, 0xea, 0xcb, 0x3a, 0x26, 0x17, + 0x84, 0x9a, 0x65, 0x38, 0x41, 0x0d, 0xb5, 0x61, 0xe2, 0x8e, 0xeb, 0x35, 0xfd, 0x3b, 0xa1, 0xa4, + 0x3f, 0x92, 0x2f, 0x0f, 0xbd, 0xcd, 0x31, 0x13, 0x7d, 0x34, 0x9a, 0xbb, 0x6d, 0x10, 0xc3, 0x09, + 0xe2, 0x74, 0xd5, 0x06, 0x5d, 0x6f, 0x21, 0xbc, 0x19, 0x92, 0x40, 0xe4, 0x2a, 0x67, 0xab, 0x16, + 0xcb, 0x42, 0x1c, 0xc3, 0xe9, 0xaa, 0x65, 0x7f, 0xae, 0x04, 0x7e, 0x97, 0xa7, 0xae, 0x10, 0xab, + 0x16, 0xab, 0x52, 0xac, 0x61, 0xd0, 0x5d, 0xcd, 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x9e, + 0x03, 0x4c, 0xa7, 0xaf, 0x95, 0x63, 0x03, 0x0b, 0xad, 0x00, 0x0a, 0xbb, 0x9d, 0x4e, 0x8b, 0xd9, + 0x06, 0x39, 0x2d, 0x46, 0x8a, 0xdb, 0x4b, 0x94, 0x79, 0xe8, 0xdd, 0x7a, 0x0a, 0x8a, 0x33, 0x6a, + 0xd0, 0x03, 0x7e, 0x4b, 0x74, 0x75, 0x90, 0x75, 0x95, 0xeb, 0x4e, 0xea, 0xbc, 0x9f, 0x12, 0x86, + 0x96, 0x61, 0x38, 0xdc, 0x0f, 0x1b, 0x91, 0x88, 0x94, 0x98, 0x93, 0x30, 0xaa, 0xce, 0x50, 0xb4, + 0x7c, 0x85, 0xbc, 0x0a, 0x96, 0x75, 0x51, 0x03, 0x4e, 0x08, 0x8a, 0x4b, 0x3b, 0x8e, 0xa7, 0xd2, + 0xef, 0x70, 0x13, 0xe9, 0xcb, 0xf7, 0x0e, 0xe6, 0x4e, 0x88, 0x96, 0x75, 0xf0, 0xfd, 0x83, 0xb9, + 0xd3, 0x35, 0xbf, 0x99, 0x01, 0xc1, 0x59, 0xd4, 0xf8, 0xe2, 0x6b, 0x34, 0xfc, 0x76, 0xa7, 0x16, + 0xf8, 0x5b, 0x6e, 0x8b, 0x14, 0xe9, 0x9f, 0xea, 0x06, 0xa6, 0x58, 0x7c, 0x46, 0x19, 0x4e, 0x50, + 0xb3, 0xbf, 0x93, 0x31, 0x41, 0x2c, 0x3d, 0x77, 0xd4, 0x0d, 0x08, 0x6a, 0xc3, 0x78, 0x87, 0x6d, + 0x13, 0x91, 0x50, 0x42, 0xac, 0xf5, 0x17, 0xfa, 0x94, 0xa3, 0xdc, 0xa1, 0x77, 0x87, 0x69, 0x63, + 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x1b, 0x8f, 0xb0, 0x6b, 0xb4, 0xce, 0x85, 0x23, 0xc3, + 0xc2, 0x23, 0x43, 0xbc, 0xc7, 0x66, 0xf3, 0xa5, 0x74, 0xf1, 0xb4, 0x08, 0xaf, 0x0e, 0x2c, 0xeb, + 0xa2, 0x4f, 0xc2, 0x04, 0x7d, 0xde, 0xa8, 0xab, 0x2c, 0x9c, 0x39, 0x99, 0x1f, 0x39, 0x43, 0x61, + 0xe9, 0xc9, 0x66, 0xf4, 0xca, 0x38, 0x41, 0x0c, 0xbd, 0xce, 0x6c, 0x7a, 0x24, 0xe9, 0x52, 0x3f, + 0xa4, 0x75, 0xf3, 0x1d, 0x49, 0x56, 0x23, 0x82, 0xba, 0x70, 0x22, 0x9d, 0x9a, 0x2e, 0x9c, 0xb1, + 0xf3, 0xf9, 0xc4, 0x74, 0x76, 0xb9, 0x38, 0x2b, 0x48, 0x1a, 0x16, 0xe2, 0x2c, 0xfa, 0xe8, 0x3a, + 0x8c, 0x8b, 0x1c, 0xd5, 0x62, 0xe5, 0x96, 0x0d, 0xe1, 0xe1, 0x38, 0xd6, 0x81, 0xf7, 0x93, 0x05, + 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0xac, 0x96, 0x33, 0xea, 0x4a, 0xe0, 0x30, 0x0b, 0x00, 0x97, 0x1d, + 0xa7, 0xda, 0x05, 0xff, 0xf8, 0xbd, 0x83, 0xb9, 0xb3, 0x1b, 0x45, 0x88, 0xb8, 0x98, 0x0e, 0xba, + 0x01, 0xa7, 0xb8, 0xdf, 0x77, 0x95, 0x38, 0xcd, 0x96, 0xeb, 0x29, 0x0e, 0x82, 0x6f, 0xf9, 0x33, + 0xf7, 0x0e, 0xe6, 0x4e, 0x2d, 0x64, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xc3, 0x50, 0x69, 0x7a, 0xa1, + 0x18, 0x83, 0x21, 0x23, 0x2d, 0x57, 0xa5, 0xba, 0x5e, 0x57, 0xdf, 0x1f, 0xff, 0xc1, 0x71, 0x05, + 0xb4, 0xcd, 0x05, 0xcc, 0x4a, 0xec, 0x31, 0x9c, 0x8a, 0x7b, 0x95, 0x94, 0x0c, 0x1a, 0x9e, 0x9f, + 0x5c, 0xb3, 0xa2, 0x1c, 0x22, 0x0c, 0xa7, 0x50, 0x83, 0x30, 0x7a, 0x0d, 0x90, 0x08, 0xff, 0xbe, + 0xd0, 0x60, 0xd9, 0x4a, 0x98, 0x3c, 0x7e, 0xc4, 0xf4, 0x45, 0xac, 0xa7, 0x30, 0x70, 0x46, 0x2d, + 0x74, 0x95, 0x9e, 0x2a, 0x7a, 0xa9, 0x38, 0xb5, 0x54, 0x12, 0xc5, 0x2a, 0xe9, 0x04, 0x84, 0x59, + 0x34, 0x99, 0x14, 0x71, 0xa2, 0x1e, 0x6a, 0xc2, 0x63, 0x4e, 0x37, 0xf2, 0x99, 0xec, 0xde, 0x44, + 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0xd4, 0x66, 0x23, 0x8b, 0xe7, 0x29, 0x8b, 0xb2, 0x50, 0x80, 0x87, + 0x0b, 0xa9, 0x50, 0xd6, 0x52, 0x65, 0x4d, 0x06, 0x33, 0x9a, 0x57, 0x46, 0xe6, 0xe4, 0x17, 0x61, + 0x74, 0xc7, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf1, 0x83, 0x5d, 0x11, 0x95, 0x36, 0x8e, 0xf1, 0x1d, + 0x83, 0xb0, 0x8e, 0x47, 0xdf, 0x8e, 0xcc, 0xa8, 0x63, 0xb5, 0xca, 0xf4, 0xe9, 0x23, 0xf1, 0x19, + 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xab, 0xb5, 0x25, 0xa6, 0x1b, 0x4f, 0xa0, 0xae, 0xd6, + 0x96, 0xb0, 0x84, 0xd3, 0xe5, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x05, 0x7e, 0x83, 0x84, 0x5a, 0x64, + 0xf9, 0x47, 0x79, 0xcc, 0x5d, 0xba, 0x5c, 0xeb, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x22, 0xe9, 0x7c, + 0x69, 0x13, 0xf9, 0x4a, 0x8d, 0x34, 0x3f, 0xd3, 0x67, 0xca, 0x34, 0x0f, 0xa6, 0x54, 0xa6, 0x36, + 0x1e, 0x65, 0x37, 0x9c, 0x99, 0x64, 0x6b, 0xbb, 0xff, 0x10, 0xbd, 0x4a, 0x4d, 0xb4, 0x9a, 0xa0, + 0x84, 0x53, 0xb4, 0x8d, 0x80, 0x6d, 0x53, 0x3d, 0x03, 0xb6, 0x5d, 0x82, 0x4a, 0xd8, 0xdd, 0x6c, + 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0x6e, 0x5c, 0x7b, 0xc4, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0x68, 0x05, + 0x46, 0x1c, 0xa9, 0x03, 0x42, 0xf9, 0x21, 0x7a, 0x94, 0xe6, 0x87, 0x47, 0xad, 0x90, 0x5a, 0x1f, + 0x55, 0x17, 0xbd, 0x02, 0xe3, 0xc2, 0x6f, 0x59, 0x24, 0x09, 0x3d, 0x61, 0x3a, 0x97, 0xd5, 0x75, + 0x20, 0x36, 0x71, 0xd1, 0x4d, 0x18, 0x8d, 0xfc, 0x16, 0xf3, 0x90, 0xa2, 0x6c, 0xde, 0xe9, 0xfc, + 0x60, 0x73, 0x1b, 0x0a, 0x4d, 0x17, 0xbf, 0xaa, 0xaa, 0x58, 0xa7, 0x83, 0x36, 0xf8, 0x7a, 0x67, + 0x71, 0xe4, 0x49, 0x38, 0xf3, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0xcd, 0x9b, 0xdb, 0x41, 0xd4, 0xc4, + 0x3a, 0x19, 0x74, 0x05, 0xa6, 0x3b, 0x81, 0xeb, 0xb3, 0x35, 0xa1, 0xd4, 0x7f, 0x33, 0x66, 0xd6, + 0xa8, 0x5a, 0x12, 0x01, 0xa7, 0xeb, 0x30, 0xb7, 0x73, 0x51, 0x38, 0x73, 0x86, 0x67, 0xbe, 0xe0, + 0x6f, 0x42, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x8b, 0x33, 0x66, 0x66, 0xf3, 0xa3, + 0x02, 0xe9, 0x62, 0x0f, 0xce, 0xbc, 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0x9a, 0x5a, 0xc2, 0x49, 0xfa, + 0x62, 0x08, 0x67, 0x1e, 0x2b, 0xb0, 0xac, 0x4b, 0x3c, 0x2f, 0x62, 0x86, 0xc0, 0x28, 0x0e, 0x71, + 0x82, 0x26, 0xfa, 0x28, 0x4c, 0x89, 0x58, 0x86, 0xf1, 0x30, 0x9d, 0x8d, 0xed, 0xce, 0x71, 0x02, + 0x86, 0x53, 0xd8, 0x3c, 0xf3, 0x84, 0xb3, 0xd9, 0x22, 0xe2, 0xe8, 0xbb, 0xee, 0x7a, 0xbb, 0xe1, + 0xcc, 0x39, 0x76, 0x3e, 0x88, 0xcc, 0x13, 0x49, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xaa, 0x13, + 0x10, 0xd2, 0x66, 0x8c, 0xbe, 0xb8, 0xcf, 0xe6, 0x78, 0xd4, 0x05, 0xda, 0x93, 0x5a, 0x02, 0x76, + 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b, 0x30, 0xe2, 0xef, 0x91, 0x60, 0x87, 0x38, 0xcd, 0x99, + 0xf3, 0x05, 0x7e, 0x10, 0xe2, 0x72, 0xbb, 0x21, 0x70, 0x13, 0x26, 0x03, 0xb2, 0xb8, 0xb7, 0xc9, + 0x80, 0x6c, 0x0c, 0xfd, 0x88, 0x05, 0x67, 0xa4, 0x96, 0xa1, 0xde, 0xa1, 0xa3, 0xbe, 0xe4, 0x7b, + 0x61, 0x14, 0xf0, 0x38, 0x01, 0x8f, 0xe7, 0xfb, 0xce, 0x6f, 0xe4, 0x54, 0x52, 0x12, 0xd5, 0x33, + 0x79, 0x18, 0x21, 0xce, 0x6f, 0x11, 0x2d, 0xc1, 0x74, 0x48, 0x22, 0x79, 0x18, 0x2d, 0x84, 0x2b, + 0xaf, 0x57, 0xd7, 0x67, 0x9e, 0xe0, 0x41, 0x0e, 0xe8, 0x66, 0xa8, 0x27, 0x81, 0x38, 0x8d, 0x8f, + 0x2e, 0x43, 0xc9, 0x0f, 0x67, 0xde, 0x5b, 0x90, 0xa3, 0x94, 0x3e, 0xc5, 0xb9, 0xe9, 0xd8, 0x8d, + 0x3a, 0x2e, 0xf9, 0xe1, 0xec, 0xb7, 0xc3, 0x74, 0x8a, 0x63, 0x38, 0x4c, 0x6e, 0x9f, 0xd9, 0x5d, + 0x18, 0x37, 0x66, 0xe5, 0xa1, 0x6a, 0xa9, 0xff, 0xf5, 0x30, 0x54, 0x94, 0x06, 0x13, 0x5d, 0x32, + 0x15, 0xd3, 0x67, 0x92, 0x8a, 0xe9, 0x91, 0x9a, 0xdf, 0x34, 0x74, 0xd1, 0x1b, 0x19, 0xd1, 0xe0, + 0xf2, 0xce, 0x80, 0xfe, 0x0d, 0xe4, 0x35, 0xb1, 0x70, 0xb9, 0x6f, 0x0d, 0xf7, 0x40, 0xa1, 0xa4, + 0xf9, 0x0a, 0x4c, 0x7b, 0x3e, 0x63, 0x53, 0x49, 0x53, 0xf2, 0x20, 0x8c, 0xd5, 0xa8, 0xe8, 0xe1, + 0x55, 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0x85, 0xa4, 0x68, 0x9b, 0xb3, 0x12, 0x58, + 0x40, 0xd1, 0x13, 0x30, 0xd8, 0xf1, 0x9b, 0xab, 0x35, 0xc1, 0xa2, 0x6a, 0x31, 0x48, 0x9b, 0xab, + 0x35, 0xcc, 0x61, 0x68, 0x01, 0x86, 0xd8, 0x8f, 0x70, 0x66, 0x2c, 0x3f, 0x8e, 0x06, 0xab, 0xa1, + 0x65, 0x4e, 0x62, 0x15, 0xb0, 0xa8, 0xc8, 0x44, 0x6c, 0x94, 0xaf, 0x67, 0x22, 0xb6, 0xe1, 0x07, + 0x14, 0xb1, 0x49, 0x02, 0x38, 0xa6, 0x85, 0xee, 0xc2, 0x29, 0xe3, 0x2d, 0xc5, 0x97, 0x08, 0x09, + 0x85, 0x2f, 0xff, 0x13, 0x85, 0x8f, 0x28, 0xa1, 0x11, 0x3f, 0x2b, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, + 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xba, 0x91, 0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0x84, 0xa6, + 0x5b, 0x4c, 0x13, 0x46, 0xaf, 0xc0, 0xc8, 0x9b, 0x7e, 0xc8, 0x8e, 0x77, 0xc1, 0x56, 0x4b, 0x47, + 0xf0, 0x91, 0xd7, 0x6f, 0xd4, 0x59, 0xf9, 0xfd, 0x83, 0xb9, 0xd1, 0x9a, 0xdf, 0x94, 0x7f, 0xb1, + 0xaa, 0x80, 0xbe, 0xdf, 0x82, 0xd9, 0xf4, 0x63, 0x4d, 0x75, 0x7a, 0xbc, 0xff, 0x4e, 0xdb, 0xa2, + 0xd1, 0xd9, 0xe5, 0x5c, 0x72, 0xb8, 0xa0, 0x29, 0xfb, 0x97, 0x2c, 0x26, 0xa8, 0x13, 0x9a, 0x26, + 0x12, 0x76, 0x5b, 0xc7, 0x91, 0x30, 0x76, 0xd9, 0x50, 0x82, 0x3d, 0xb0, 0x85, 0xc4, 0x3f, 0xb7, + 0x98, 0x85, 0xc4, 0x31, 0xba, 0x42, 0xbc, 0x0e, 0x23, 0x91, 0x4c, 0xe4, 0x5b, 0x90, 0xe3, 0x56, + 0xeb, 0x14, 0xb3, 0x12, 0x51, 0x4c, 0xae, 0xca, 0xd9, 0xab, 0xc8, 0xd8, 0xff, 0x88, 0xcf, 0x80, + 0x84, 0x1c, 0x83, 0xae, 0xa1, 0x6a, 0xea, 0x1a, 0xe6, 0x7a, 0x7c, 0x41, 0x8e, 0xce, 0xe1, 0x1f, + 0x9a, 0xfd, 0x66, 0xc2, 0x9d, 0x77, 0xba, 0x69, 0x8e, 0xfd, 0x79, 0x0b, 0x20, 0x0e, 0xf1, 0xdc, + 0x47, 0xaa, 0xb6, 0x97, 0x28, 0x5b, 0xeb, 0x47, 0x7e, 0xc3, 0x6f, 0x09, 0x4d, 0xda, 0x63, 0xb1, + 0xba, 0x83, 0x97, 0xdf, 0xd7, 0x7e, 0x63, 0x85, 0x8d, 0xe6, 0x64, 0x40, 0xb9, 0x72, 0xac, 0x80, + 0x33, 0x82, 0xc9, 0x7d, 0xd1, 0x82, 0x93, 0x59, 0x76, 0xb5, 0xf4, 0x91, 0xc4, 0xc5, 0x5c, 0xca, + 0x6c, 0x4a, 0xcd, 0xe6, 0x2d, 0x51, 0x8e, 0x15, 0x46, 0xdf, 0x39, 0xf0, 0x0e, 0x17, 0x5b, 0xf9, + 0x06, 0x8c, 0xd7, 0x02, 0xa2, 0x5d, 0xae, 0xaf, 0xf2, 0x20, 0x05, 0xbc, 0x3f, 0xcf, 0x1c, 0x3a, + 0x40, 0x81, 0xfd, 0xe5, 0x12, 0x9c, 0xe4, 0xd6, 0x07, 0x0b, 0x7b, 0xbe, 0xdb, 0xac, 0xf9, 0x4d, + 0xe1, 0x3d, 0xf5, 0x09, 0x18, 0xeb, 0x68, 0xb2, 0xc9, 0xa2, 0x38, 0xa1, 0xba, 0x0c, 0x33, 0x96, + 0xa6, 0xe8, 0xa5, 0xd8, 0xa0, 0x85, 0x9a, 0x30, 0x46, 0xf6, 0xdc, 0x86, 0x52, 0x61, 0x97, 0x0e, + 0x7d, 0xd1, 0xa9, 0x56, 0x96, 0x35, 0x3a, 0xd8, 0xa0, 0xfa, 0x10, 0x32, 0x53, 0xdb, 0x3f, 0x66, + 0xc1, 0x23, 0x39, 0x51, 0x45, 0x69, 0x73, 0x77, 0x98, 0x9d, 0x87, 0x58, 0xb6, 0xaa, 0x39, 0x6e, + 0xfd, 0x81, 0x05, 0x14, 0x7d, 0x0c, 0x80, 0x5b, 0x6f, 0xd0, 0x57, 0x7a, 0xaf, 0xf0, 0x8b, 0x46, + 0xe4, 0x38, 0x2d, 0x08, 0x98, 0xac, 0x8f, 0x35, 0x5a, 0xf6, 0x17, 0x07, 0x60, 0x90, 0x67, 0xd1, + 0xaf, 0xc1, 0xf0, 0x0e, 0xcf, 0x13, 0x53, 0x38, 0x6f, 0x14, 0x57, 0xa6, 0x9e, 0x89, 0xe7, 0x4d, + 0x2b, 0xc5, 0x92, 0x0c, 0x5a, 0x83, 0x13, 0x3c, 0x5d, 0x4f, 0xab, 0x4a, 0x5a, 0xce, 0xbe, 0x14, + 0xfb, 0xf1, 0xdc, 0xb2, 0x4a, 0xfc, 0xb9, 0x9a, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xab, 0x30, 0x41, + 0x9f, 0x61, 0x7e, 0x37, 0x92, 0x94, 0x78, 0xa2, 0x1e, 0xf5, 0xee, 0xdb, 0x30, 0xa0, 0x38, 0x81, + 0x8d, 0x5e, 0x81, 0xf1, 0x4e, 0x4a, 0xc0, 0x39, 0x18, 0x4b, 0x02, 0x4c, 0xa1, 0xa6, 0x89, 0xcb, + 0x4c, 0x6b, 0xbb, 0xcc, 0x90, 0x78, 0x63, 0x27, 0x20, 0xe1, 0x8e, 0xdf, 0x6a, 0x32, 0xf6, 0x6f, + 0x50, 0x33, 0xad, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0xa5, 0xb2, 0xe5, 0xb8, 0xad, 0x6e, 0x40, 0x62, + 0x2a, 0x43, 0x26, 0x95, 0x95, 0x04, 0x1c, 0xa7, 0x6a, 0xf4, 0x96, 0xdc, 0x0e, 0x1f, 0x8d, 0xe4, + 0xd6, 0xfe, 0x5b, 0x25, 0x30, 0xa6, 0xf6, 0x5b, 0x37, 0x81, 0x10, 0xfd, 0xb2, 0xed, 0xa0, 0xd3, + 0x10, 0x96, 0x31, 0x99, 0x5f, 0x16, 0xe7, 0x05, 0xe5, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, + 0xf1, 0x53, 0xb5, 0xc0, 0xa7, 0x97, 0x9c, 0x0c, 0x63, 0xa5, 0x2c, 0xd8, 0x87, 0xa5, 0x77, 0x6f, + 0x41, 0xc0, 0x47, 0x61, 0xe3, 0xcb, 0x29, 0x18, 0x46, 0x24, 0x75, 0xe1, 0x6b, 0x2f, 0xa9, 0xa0, + 0xcb, 0x30, 0x2a, 0xb2, 0xc2, 0x30, 0x43, 0x6b, 0xbe, 0x99, 0x98, 0xd1, 0x4b, 0x35, 0x2e, 0xc6, + 0x3a, 0x8e, 0xfd, 0x03, 0x25, 0x38, 0x91, 0xe1, 0x29, 0xc3, 0xaf, 0x91, 0x6d, 0x37, 0x8c, 0x54, + 0xea, 0x51, 0xed, 0x1a, 0xe1, 0xe5, 0x58, 0x61, 0xd0, 0xb3, 0x8a, 0x5f, 0x54, 0xc9, 0xcb, 0x49, + 0x58, 0xa2, 0x0b, 0xe8, 0x21, 0x93, 0x78, 0x9e, 0x87, 0x81, 0x6e, 0x48, 0x64, 0xa8, 0x56, 0x75, + 0x6d, 0x33, 0xb5, 0x26, 0x83, 0xd0, 0x67, 0xd4, 0xb6, 0xd2, 0x10, 0x6a, 0xcf, 0x28, 0xae, 0x23, + 0xe4, 0x30, 0xda, 0xb9, 0x88, 0x78, 0x8e, 0x17, 0x89, 0xc7, 0x56, 0x1c, 0x73, 0x90, 0x95, 0x62, + 0x01, 0xb5, 0xbf, 0x50, 0x86, 0x33, 0xb9, 0xbe, 0x73, 0xb4, 0xeb, 0x6d, 0xdf, 0x73, 0x23, 0x5f, + 0x59, 0x13, 0xf1, 0x38, 0x83, 0xa4, 0xb3, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0x90, + 0x09, 0x45, 0x53, 0x49, 0x58, 0x17, 0xab, 0x3c, 0xf0, 0x14, 0x07, 0xf7, 0x9d, 0x37, 0xfb, 0x09, + 0xca, 0xc1, 0xf8, 0xad, 0xe4, 0x85, 0x42, 0xbb, 0xeb, 0xfb, 0x2d, 0xcc, 0x80, 0xe8, 0x7d, 0x62, + 0xbc, 0x12, 0xe6, 0x33, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x68, 0x4f, 0xc1, 0xf0, 0x2e, 0xd9, 0x0f, + 0x5c, 0x6f, 0x3b, 0x69, 0x56, 0x75, 0x8d, 0x17, 0x63, 0x09, 0x37, 0xb3, 0x06, 0x0e, 0x1f, 0x75, + 0xc2, 0xeb, 0x91, 0x9e, 0xec, 0xc9, 0x0f, 0x95, 0x61, 0x12, 0x2f, 0x56, 0xdf, 0x9d, 0x88, 0x9b, + 0xe9, 0x89, 0x38, 0xea, 0x84, 0xd7, 0xbd, 0x67, 0xe3, 0xe7, 0x2d, 0x98, 0x64, 0xb9, 0x69, 0x84, + 0x87, 0xbc, 0xeb, 0x7b, 0xc7, 0xf0, 0x14, 0x78, 0x02, 0x06, 0x03, 0xda, 0x68, 0x32, 0xfb, 0x2a, + 0xeb, 0x09, 0xe6, 0x30, 0xf4, 0x18, 0x0c, 0xb0, 0x2e, 0xd0, 0xc9, 0x1b, 0xe3, 0x47, 0x70, 0xd5, + 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xd8, 0x25, 0x4c, 0x3a, 0x2d, 0x97, 0x77, 0x3a, 0x56, 0x59, 0xbf, + 0x33, 0xbc, 0xea, 0x33, 0xbb, 0xf6, 0xf6, 0xc2, 0x2e, 0x65, 0x93, 0x2c, 0x7e, 0x66, 0xff, 0x51, + 0x09, 0xce, 0x65, 0xd6, 0xeb, 0x3b, 0xec, 0x52, 0x71, 0xed, 0x87, 0x99, 0x7d, 0xa4, 0x7c, 0x8c, + 0x46, 0xab, 0x03, 0xfd, 0x72, 0xff, 0x83, 0x7d, 0x44, 0x43, 0xca, 0x1c, 0xb2, 0x77, 0x48, 0x34, + 0xa4, 0xcc, 0xbe, 0xe5, 0x88, 0x09, 0xfe, 0xbc, 0x94, 0xf3, 0x2d, 0x4c, 0x60, 0x70, 0x91, 0x9e, + 0x33, 0x0c, 0x18, 0xca, 0x47, 0x38, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xb4, 0x00, 0x93, 0x6d, + 0xd7, 0xa3, 0x87, 0xcf, 0xbe, 0xc9, 0x8a, 0xab, 0x60, 0x75, 0x6b, 0x26, 0x18, 0x27, 0xf1, 0x91, + 0xab, 0x45, 0x4a, 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x76, 0xdd, 0xbc, 0xa9, 0xce, 0x57, 0xa3, 0x98, + 0x11, 0x35, 0x69, 0x4d, 0x93, 0x13, 0x95, 0xfb, 0x97, 0x13, 0x8d, 0x65, 0xcb, 0x88, 0x66, 0x5f, + 0x81, 0xf1, 0x07, 0x56, 0x0c, 0xd8, 0x5f, 0x2b, 0xc3, 0xa3, 0x05, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, + 0x1c, 0x68, 0x67, 0x7d, 0x6a, 0x1e, 0x6a, 0x70, 0x72, 0xab, 0xdb, 0x6a, 0xed, 0x33, 0x5f, 0x0e, + 0xd2, 0x94, 0x18, 0x82, 0xa7, 0x94, 0xc2, 0x91, 0x93, 0x2b, 0x19, 0x38, 0x38, 0xb3, 0x26, 0x7d, + 0x62, 0xd1, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0x3c, 0xb1, 0xb0, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, + 0xd3, 0xce, 0x9e, 0xe3, 0xf2, 0x70, 0xd3, 0x92, 0x00, 0x7f, 0x63, 0x29, 0x79, 0xee, 0x42, 0x12, + 0x01, 0xa7, 0xeb, 0xa0, 0xd7, 0x00, 0xf9, 0x9b, 0xcc, 0xe2, 0xbb, 0x79, 0x85, 0x78, 0x42, 0xeb, + 0xca, 0xe6, 0xae, 0x1c, 0x1f, 0x09, 0x37, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x22, 0xd0, 0x50, + 0x7e, 0x44, 0xa0, 0xe2, 0x73, 0xb1, 0x67, 0xe2, 0x9b, 0xff, 0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, + 0x66, 0x00, 0xcd, 0x57, 0x98, 0xd5, 0x24, 0x97, 0xf5, 0x6a, 0xf1, 0x53, 0x4e, 0x69, 0x56, 0x93, + 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x84, 0xb1, 0xdb, 0xae, 0xc1, 0xe2, 0x8b, 0x28, 0x5f, 0x0a, + 0x03, 0x7d, 0x1c, 0x86, 0x9b, 0xee, 0x9e, 0x1b, 0x0a, 0x49, 0xd7, 0xa1, 0xd5, 0x4a, 0xf1, 0x39, + 0x58, 0xe5, 0x64, 0xb0, 0xa4, 0x67, 0xff, 0x50, 0x09, 0xc6, 0x65, 0x8b, 0xaf, 0x77, 0xfd, 0xc8, + 0x39, 0x86, 0x6b, 0xf9, 0x8a, 0x71, 0x2d, 0xbf, 0xaf, 0x28, 0xd4, 0x19, 0xeb, 0x52, 0xee, 0x75, + 0x7c, 0x23, 0x71, 0x1d, 0x3f, 0xd9, 0x9b, 0x54, 0xf1, 0x35, 0xfc, 0x8f, 0x2d, 0x98, 0x36, 0xf0, + 0x8f, 0xe1, 0x36, 0x58, 0x31, 0x6f, 0x83, 0xc7, 0x7b, 0x7e, 0x43, 0xce, 0x2d, 0xf0, 0xbd, 0xe5, + 0x44, 0xdf, 0xd9, 0xe9, 0xff, 0x26, 0x0c, 0xec, 0x38, 0x41, 0xb3, 0x28, 0xb5, 0x43, 0xaa, 0xd2, + 0xfc, 0x55, 0x27, 0x10, 0x6a, 0xe7, 0x67, 0xe4, 0xa8, 0xd3, 0xa2, 0x9e, 0x2a, 0x67, 0xd6, 0x14, + 0x7a, 0x09, 0x86, 0xc2, 0x86, 0xdf, 0x51, 0x9e, 0x1c, 0xe7, 0xd9, 0x40, 0xb3, 0x92, 0xfb, 0x07, + 0x73, 0xc8, 0x6c, 0x8e, 0x16, 0x63, 0x81, 0x8f, 0x3e, 0x01, 0xe3, 0xec, 0x97, 0xb2, 0x01, 0x2b, + 0xe7, 0x8b, 0x23, 0xea, 0x3a, 0x22, 0x37, 0x90, 0x34, 0x8a, 0xb0, 0x49, 0x6a, 0x76, 0x1b, 0x2a, + 0xea, 0xb3, 0x1e, 0xaa, 0xde, 0xf6, 0xdf, 0x97, 0xe1, 0x44, 0xc6, 0x9a, 0x43, 0xa1, 0x31, 0x13, + 0x97, 0xfb, 0x5c, 0xaa, 0x6f, 0x73, 0x2e, 0x42, 0xf6, 0x1a, 0x6a, 0x8a, 0xb5, 0xd5, 0x77, 0xa3, + 0x37, 0x43, 0x92, 0x6c, 0x94, 0x16, 0xf5, 0x6e, 0x94, 0x36, 0x76, 0x6c, 0x43, 0x4d, 0x1b, 0x52, + 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xa7, 0x65, 0x38, 0x99, 0x15, 0x7d, 0x11, 0x7d, 0x36, 0x91, 0x58, + 0xf4, 0x85, 0x7e, 0xe3, 0x36, 0xf2, 0x6c, 0xa3, 0x22, 0x20, 0xdc, 0xbc, 0x99, 0x6a, 0xb4, 0xe7, + 0x30, 0x8b, 0x36, 0x59, 0x48, 0x8a, 0x80, 0x27, 0x84, 0x95, 0xc7, 0xc7, 0x07, 0xfb, 0xee, 0x80, + 0xc8, 0x24, 0x1b, 0x26, 0xec, 0x4b, 0x64, 0x71, 0x6f, 0xfb, 0x12, 0xd9, 0xf2, 0xac, 0x0b, 0xa3, + 0xda, 0xd7, 0x3c, 0xd4, 0x19, 0xdf, 0xa5, 0xb7, 0x95, 0xd6, 0xef, 0x87, 0x3a, 0xeb, 0x3f, 0x66, + 0x41, 0xc2, 0xe5, 0x40, 0x89, 0xc5, 0xac, 0x5c, 0xb1, 0xd8, 0x79, 0x18, 0x08, 0xfc, 0x16, 0x49, + 0x66, 0xe0, 0xc4, 0x7e, 0x8b, 0x60, 0x06, 0xa1, 0x18, 0x51, 0x2c, 0xec, 0x18, 0xd3, 0x1f, 0x72, + 0xe2, 0x89, 0xf6, 0x04, 0x0c, 0xb6, 0xc8, 0x1e, 0x69, 0x25, 0x13, 0x25, 0x5d, 0xa7, 0x85, 0x98, + 0xc3, 0xec, 0x9f, 0x1f, 0x80, 0xb3, 0x85, 0x41, 0x5d, 0xe8, 0x73, 0x68, 0xdb, 0x89, 0xc8, 0x1d, + 0x67, 0x3f, 0x99, 0xd1, 0xe4, 0x0a, 0x2f, 0xc6, 0x12, 0xce, 0x3c, 0xc9, 0x78, 0x60, 0xf2, 0x84, + 0x10, 0x51, 0xc4, 0x23, 0x17, 0x50, 0x53, 0x28, 0x55, 0x3e, 0x0a, 0xa1, 0xd4, 0x73, 0x00, 0x61, + 0xd8, 0xe2, 0x86, 0x59, 0x4d, 0xe1, 0xa2, 0x16, 0x07, 0xb0, 0xaf, 0x5f, 0x17, 0x10, 0xac, 0x61, + 0xa1, 0x2a, 0x4c, 0x75, 0x02, 0x3f, 0xe2, 0x32, 0xd9, 0x2a, 0xb7, 0x5d, 0x1c, 0x34, 0xe3, 0x69, + 0xd4, 0x12, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x84, 0x51, 0x11, 0x63, 0xa3, 0xe6, 0xfb, 0x2d, 0x21, + 0x06, 0x52, 0xe6, 0x7c, 0xf5, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xf4, 0x0e, 0x67, 0x56, + 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x11, 0x89, 0x75, 0xa4, 0xaf, 0x48, 0xac, 0xb1, 0x60, 0xac, 0xd2, + 0xb7, 0xde, 0x11, 0x7a, 0x8a, 0x92, 0x7e, 0x76, 0x00, 0x4e, 0x88, 0x85, 0xf3, 0xb0, 0x97, 0xcb, + 0xcd, 0xf4, 0x72, 0x39, 0x0a, 0xd1, 0xd9, 0xbb, 0x6b, 0xe6, 0xb8, 0xd7, 0xcc, 0x0f, 0x5b, 0x60, + 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x09, 0xf5, 0x62, 0x2e, 0xbb, 0xa6, 0xa2, 0x7a, 0xbe, 0xcd, + 0xe4, 0x50, 0xf6, 0x7f, 0xb4, 0xe0, 0xf1, 0x9e, 0x14, 0xd1, 0x32, 0x54, 0x18, 0x0f, 0xa8, 0xbd, + 0xce, 0x9e, 0x54, 0xb6, 0xcd, 0x12, 0x90, 0xc3, 0x92, 0xc6, 0x35, 0xd1, 0x72, 0x2a, 0xf7, 0xd6, + 0x53, 0x19, 0xb9, 0xb7, 0x4e, 0x19, 0xc3, 0xf3, 0x80, 0xc9, 0xb7, 0x7e, 0x90, 0xde, 0x38, 0x86, + 0x5f, 0x11, 0xfa, 0xa0, 0x21, 0xf6, 0xb3, 0x13, 0x62, 0x3f, 0x64, 0x62, 0x6b, 0x77, 0xc8, 0x47, + 0x61, 0x8a, 0x05, 0xdf, 0x62, 0x96, 0xf6, 0xc2, 0xe3, 0xa9, 0x14, 0x5b, 0xd3, 0x5e, 0x4f, 0xc0, + 0x70, 0x0a, 0xdb, 0xfe, 0xc3, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x6f, 0xc2, 0xa7, 0xa1, 0xe2, + 0xb6, 0xdb, 0x5d, 0x9e, 0x4e, 0x69, 0x90, 0xfb, 0x46, 0xd3, 0x79, 0x5a, 0x95, 0x85, 0x38, 0x86, + 0xa3, 0x15, 0x21, 0x71, 0x2e, 0x88, 0xef, 0xc9, 0x3b, 0x3e, 0x5f, 0x75, 0x22, 0x87, 0x33, 0x38, + 0xea, 0x9e, 0x8d, 0x65, 0xd3, 0xe8, 0x53, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x36, 0x2d, 0x13, 0x61, + 0x85, 0xdf, 0x5f, 0x40, 0xad, 0xae, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, + 0xf3, 0xc6, 0x4d, 0x3f, 0x9b, 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0xfd, 0x10, 0x54, 0x14, + 0xf1, 0x5e, 0xf2, 0xa7, 0x31, 0x9d, 0x2d, 0xfa, 0x08, 0x4c, 0x26, 0xfa, 0x76, 0x28, 0xf1, 0xd5, + 0x2f, 0x58, 0x30, 0xc9, 0x3b, 0xb3, 0xec, 0xed, 0x89, 0xdb, 0xe0, 0x2d, 0x38, 0xd9, 0xca, 0x38, + 0x95, 0xc5, 0xf4, 0xf7, 0x7f, 0x8a, 0x2b, 0x71, 0x55, 0x16, 0x14, 0x67, 0xb6, 0x81, 0x2e, 0xd2, + 0x1d, 0x47, 0x4f, 0x5d, 0xa7, 0x25, 0x5c, 0xa5, 0xc7, 0xf8, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, + 0xdf, 0xb1, 0x60, 0x9a, 0xf7, 0xfc, 0x1a, 0xd9, 0x57, 0x67, 0xd3, 0x37, 0xb2, 0xef, 0x22, 0x91, + 0x5f, 0x29, 0x27, 0x91, 0x9f, 0xfe, 0x69, 0xe5, 0xc2, 0x4f, 0xfb, 0xb2, 0x05, 0x62, 0x85, 0x1c, + 0x83, 0x10, 0xe2, 0xdb, 0x4d, 0x21, 0xc4, 0x6c, 0xfe, 0x26, 0xc8, 0x91, 0x3e, 0xfc, 0x99, 0x05, + 0x53, 0x1c, 0x21, 0xd6, 0x96, 0x7f, 0x43, 0xe7, 0xa1, 0x9f, 0x74, 0xdf, 0xd7, 0xc8, 0xfe, 0x86, + 0x5f, 0x73, 0xa2, 0x9d, 0xec, 0x8f, 0x32, 0x26, 0x6b, 0xa0, 0x70, 0xb2, 0x9a, 0x72, 0x03, 0x19, + 0x79, 0x6e, 0x7a, 0xc4, 0x8f, 0x38, 0x6c, 0x9e, 0x1b, 0xfb, 0xeb, 0x16, 0x20, 0xde, 0x8c, 0xc1, + 0xb8, 0x51, 0x76, 0x88, 0x95, 0x6a, 0x17, 0x5d, 0x7c, 0x34, 0x29, 0x08, 0xd6, 0xb0, 0x8e, 0x64, + 0x78, 0x12, 0x26, 0x0f, 0xe5, 0xde, 0x26, 0x0f, 0x87, 0x18, 0xd1, 0x7f, 0x33, 0x04, 0x49, 0xdf, + 0x2a, 0x74, 0x0b, 0xc6, 0x1a, 0x4e, 0xc7, 0xd9, 0x74, 0x5b, 0x6e, 0xe4, 0x92, 0xb0, 0xc8, 0x1e, + 0x6a, 0x49, 0xc3, 0x13, 0x4a, 0x6a, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x27, 0x70, 0xf7, + 0xdc, 0x16, 0xd9, 0x66, 0xb2, 0x12, 0x16, 0x9c, 0x81, 0x1b, 0x67, 0xc9, 0x52, 0xac, 0x61, 0x64, + 0x38, 0xb2, 0x97, 0x1f, 0xb2, 0x23, 0x3b, 0x1c, 0x9b, 0x23, 0xfb, 0xc0, 0xa1, 0x1c, 0xd9, 0x47, + 0x0e, 0xed, 0xc8, 0x3e, 0xd8, 0x97, 0x23, 0x3b, 0x86, 0xd3, 0x92, 0xf7, 0xa4, 0xff, 0x57, 0xdc, + 0x16, 0x11, 0x0f, 0x0e, 0x1e, 0x51, 0x62, 0xf6, 0xde, 0xc1, 0xdc, 0x69, 0x9c, 0x89, 0x81, 0x73, + 0x6a, 0xa2, 0x8f, 0xc1, 0x8c, 0xd3, 0x6a, 0xf9, 0x77, 0xd4, 0xa4, 0x2e, 0x87, 0x0d, 0xa7, 0xc5, + 0x95, 0x10, 0xc3, 0x8c, 0xea, 0x63, 0xf7, 0x0e, 0xe6, 0x66, 0x16, 0x72, 0x70, 0x70, 0x6e, 0x6d, + 0xf4, 0x61, 0xa8, 0x74, 0x02, 0xbf, 0xb1, 0xa6, 0x39, 0x80, 0x9e, 0xa3, 0x03, 0x58, 0x93, 0x85, + 0xf7, 0x0f, 0xe6, 0xc6, 0xd5, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x3c, 0xd3, 0x47, 0x8f, 0xd4, + 0x33, 0x7d, 0x17, 0x4e, 0xd4, 0x49, 0xe0, 0x3a, 0x2d, 0xf7, 0x2d, 0xca, 0x2f, 0xcb, 0xf3, 0x69, + 0x03, 0x2a, 0x41, 0xe2, 0x44, 0xee, 0x2b, 0xe6, 0xa6, 0x96, 0x70, 0x44, 0x9e, 0xc0, 0x31, 0x21, + 0xfb, 0x7f, 0x5b, 0x30, 0x2c, 0x7c, 0xa9, 0x8e, 0x81, 0x6b, 0x5c, 0x30, 0x34, 0x09, 0x73, 0xd9, + 0x03, 0xc6, 0x3a, 0x93, 0xab, 0x43, 0x58, 0x4d, 0xe8, 0x10, 0x1e, 0x2f, 0x22, 0x52, 0xac, 0x3d, + 0xf8, 0x6b, 0x65, 0xca, 0xbd, 0x1b, 0x5e, 0xbd, 0x0f, 0x7f, 0x08, 0xd6, 0x61, 0x38, 0x14, 0x5e, + 0xa5, 0xa5, 0x7c, 0x9f, 0x86, 0xe4, 0x24, 0xc6, 0x76, 0x6c, 0xc2, 0x8f, 0x54, 0x12, 0xc9, 0x74, + 0x57, 0x2d, 0x3f, 0x44, 0x77, 0xd5, 0x5e, 0x7e, 0xcf, 0x03, 0x47, 0xe1, 0xf7, 0x6c, 0x7f, 0x95, + 0xdd, 0x9c, 0x7a, 0xf9, 0x31, 0x30, 0x55, 0x57, 0xcc, 0x3b, 0xd6, 0x2e, 0x58, 0x59, 0xa2, 0x53, + 0x39, 0xcc, 0xd5, 0xcf, 0x59, 0x70, 0x36, 0xe3, 0xab, 0x34, 0x4e, 0xeb, 0x19, 0x18, 0x71, 0xba, + 0x4d, 0x57, 0xed, 0x65, 0x4d, 0x9f, 0xb8, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0xee, + 0x76, 0x5c, 0xae, 0x4a, 0xd5, 0xcd, 0x7f, 0xcb, 0xdc, 0x01, 0x6f, 0x39, 0x09, 0xc4, 0x69, 0x7c, + 0x15, 0x6b, 0xa6, 0x9c, 0x1b, 0x6b, 0xe6, 0xef, 0x5a, 0x30, 0xaa, 0xfc, 0x2a, 0x1f, 0xfa, 0x68, + 0x7f, 0xd4, 0x1c, 0xed, 0x47, 0x0b, 0x46, 0x3b, 0x67, 0x98, 0x7f, 0xab, 0xa4, 0xfa, 0x5b, 0xf3, + 0x83, 0xa8, 0x0f, 0x0e, 0xee, 0xc1, 0x5d, 0x17, 0x2e, 0xc3, 0xa8, 0xd3, 0xe9, 0x48, 0x80, 0xb4, + 0x41, 0x63, 0x11, 0x94, 0xe3, 0x62, 0xac, 0xe3, 0x28, 0x4f, 0x8a, 0x72, 0xae, 0x27, 0x45, 0x13, + 0x20, 0x72, 0x82, 0x6d, 0x12, 0xd1, 0x32, 0x61, 0x32, 0x9b, 0x7f, 0xde, 0x74, 0x23, 0xb7, 0x35, + 0xef, 0x7a, 0x51, 0x18, 0x05, 0xf3, 0xab, 0x5e, 0x74, 0x23, 0xe0, 0x4f, 0x48, 0x2d, 0x5a, 0x93, + 0xa2, 0x85, 0x35, 0xba, 0x32, 0x86, 0x00, 0x6b, 0x63, 0xd0, 0x34, 0x66, 0x58, 0x17, 0xe5, 0x58, + 0x61, 0xd8, 0x1f, 0x62, 0xb7, 0x0f, 0x1b, 0xd3, 0xc3, 0x45, 0x2a, 0xfa, 0xfb, 0x63, 0x6a, 0x36, + 0x98, 0x26, 0xb3, 0xaa, 0xc7, 0x43, 0x2a, 0x3e, 0xec, 0x69, 0xc3, 0xba, 0x5f, 0x5f, 0x1c, 0x34, + 0x09, 0x7d, 0x47, 0xca, 0x40, 0xe5, 0xd9, 0x1e, 0xb7, 0xc6, 0x21, 0x4c, 0x52, 0x58, 0x3a, 0x15, + 0x96, 0x6c, 0x62, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0x4e, 0x45, 0x00, 0x70, 0x8c, 0x43, 0x99, 0x29, + 0xf5, 0x27, 0x9c, 0x41, 0x71, 0x58, 0x51, 0x85, 0x1d, 0x62, 0x0d, 0x03, 0x5d, 0x12, 0x02, 0x05, + 0xae, 0x17, 0x78, 0x34, 0x21, 0x50, 0x90, 0xc3, 0xa5, 0x49, 0x81, 0x2e, 0xc3, 0xa8, 0xca, 0xa0, + 0x5d, 0xe3, 0x89, 0x8c, 0xc4, 0x32, 0x5b, 0x8e, 0x8b, 0xb1, 0x8e, 0x83, 0x36, 0x60, 0x32, 0xe4, + 0x72, 0x36, 0x15, 0xeb, 0x99, 0xcb, 0x2b, 0xdf, 0x2f, 0xad, 0x80, 0xea, 0x26, 0xf8, 0x3e, 0x2b, + 0xe2, 0xa7, 0x93, 0xf4, 0xf3, 0x4f, 0x92, 0x40, 0xaf, 0xc2, 0x44, 0xcb, 0x77, 0x9a, 0x8b, 0x4e, + 0xcb, 0xf1, 0x1a, 0x6c, 0x7c, 0x46, 0xcc, 0x44, 0xac, 0xd7, 0x0d, 0x28, 0x4e, 0x60, 0x53, 0xe6, + 0x4d, 0x2f, 0x11, 0xf1, 0xc9, 0x1d, 0x6f, 0x9b, 0x84, 0x22, 0x1f, 0x32, 0x63, 0xde, 0xae, 0xe7, + 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x25, 0x18, 0x93, 0x9f, 0xaf, 0x85, 0xc5, 0x88, 0x9d, 0x52, 0x34, + 0x18, 0x36, 0x30, 0xd1, 0x1d, 0x38, 0x25, 0xff, 0x6f, 0x04, 0xce, 0xd6, 0x96, 0xdb, 0x10, 0xbe, + 0xe2, 0xdc, 0x7b, 0x75, 0x41, 0xba, 0x58, 0x2e, 0x67, 0x21, 0xdd, 0x3f, 0x98, 0x3b, 0x2f, 0x46, + 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0x5a, 0x83, 0x13, 0x3b, 0xc4, 0x69, 0x45, 0x3b, 0x4b, + 0x3b, 0xa4, 0xb1, 0x2b, 0x37, 0x1d, 0x0b, 0xb6, 0xa1, 0x39, 0x70, 0x5c, 0x4d, 0xa3, 0xe0, 0xac, + 0x7a, 0xe8, 0x0d, 0x98, 0xe9, 0x74, 0x37, 0x5b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x14, 0x48, + 0x25, 0xe4, 0x16, 0x51, 0x39, 0x54, 0x38, 0x93, 0x5a, 0x0e, 0x1e, 0xce, 0xa5, 0x80, 0xde, 0x82, + 0x53, 0x89, 0xc5, 0x20, 0xe2, 0x12, 0x4c, 0xe4, 0x67, 0x7b, 0xa8, 0x67, 0x55, 0x10, 0x21, 0x3e, + 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x32, 0x80, 0xdb, 0x59, 0x71, 0xda, 0x6e, 0x8b, 0x3e, 0x17, + 0x4f, 0xb0, 0x75, 0x42, 0x9f, 0x0e, 0xb0, 0x5a, 0x93, 0xa5, 0xf4, 0x7c, 0x16, 0xff, 0xf6, 0xb1, + 0x86, 0x8d, 0x6a, 0x30, 0x21, 0xfe, 0xed, 0x8b, 0x69, 0x9d, 0x56, 0x21, 0x00, 0x26, 0x64, 0x0d, + 0x35, 0x97, 0xc8, 0x2c, 0x61, 0xb3, 0x97, 0xa8, 0x8f, 0xb6, 0xe1, 0xac, 0xcc, 0xde, 0xa5, 0xaf, + 0x53, 0x39, 0x0f, 0x21, 0x4b, 0xb3, 0x30, 0xc2, 0xfd, 0x43, 0x16, 0x8a, 0x10, 0x71, 0x31, 0x1d, + 0x7a, 0xbf, 0xeb, 0xcb, 0x9d, 0x7b, 0xd0, 0x9e, 0xe2, 0xe6, 0x49, 0xf4, 0x7e, 0xbf, 0x9e, 0x04, + 0xe2, 0x34, 0x3e, 0x0a, 0xe1, 0x94, 0xeb, 0x65, 0xad, 0xee, 0xd3, 0x8c, 0xd0, 0x47, 0xb8, 0xf3, + 0x70, 0xf1, 0xca, 0xce, 0x84, 0xf3, 0x95, 0x9d, 0x49, 0xfb, 0xed, 0x59, 0xe1, 0xfd, 0xb6, 0x45, + 0x6b, 0x6b, 0x9c, 0x3a, 0xfa, 0x34, 0x8c, 0xe9, 0x1f, 0x26, 0xb8, 0x8e, 0x0b, 0xd9, 0x8c, 0xac, + 0x76, 0x3e, 0x70, 0x3e, 0x5f, 0x9d, 0x01, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x23, 0xc3, 0xcd, 0xfe, + 0x52, 0x7f, 0x5c, 0x4d, 0xff, 0x46, 0x68, 0x04, 0xb2, 0x97, 0x3d, 0xba, 0x0e, 0x23, 0x8d, 0x96, + 0x4b, 0xbc, 0x68, 0xb5, 0x56, 0x14, 0x4b, 0x6f, 0x49, 0xe0, 0x88, 0x7d, 0x24, 0xb2, 0x26, 0xf0, + 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x5a, 0x82, 0xb9, 0x1e, 0x29, 0x38, 0x12, 0x2a, 0x29, 0xab, 0x2f, + 0x95, 0xd4, 0x82, 0xcc, 0x3a, 0xbf, 0x9e, 0x90, 0x76, 0x25, 0x32, 0xca, 0xc7, 0x32, 0xaf, 0x24, + 0x7e, 0xdf, 0x2e, 0x02, 0xba, 0x56, 0x6b, 0xa0, 0xa7, 0x93, 0x8b, 0xa1, 0xcd, 0x1e, 0xec, 0xff, + 0x09, 0x9c, 0xab, 0x99, 0xb4, 0xbf, 0x5a, 0x82, 0x53, 0x6a, 0x08, 0xbf, 0x75, 0x07, 0xee, 0x66, + 0x7a, 0xe0, 0x8e, 0x40, 0xaf, 0x6b, 0xdf, 0x80, 0x21, 0x1e, 0x1c, 0xb0, 0x0f, 0xd6, 0xfb, 0x09, + 0x33, 0xf8, 0xae, 0xe2, 0xf6, 0x8c, 0x00, 0xbc, 0xdf, 0x6f, 0xc1, 0x64, 0xc2, 0xd7, 0x0c, 0x61, + 0xcd, 0x21, 0xf9, 0x41, 0xd8, 0xe3, 0x2c, 0xc6, 0xfb, 0x3c, 0x0c, 0xec, 0xf8, 0x61, 0x94, 0x34, + 0xfa, 0xb8, 0xea, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x77, 0x2d, 0x18, 0xdc, 0x70, 0x5c, 0x2f, 0x92, + 0x0a, 0x02, 0x2b, 0x47, 0x41, 0xd0, 0xcf, 0x77, 0xa1, 0x17, 0x61, 0x88, 0x6c, 0x6d, 0x91, 0x46, + 0x24, 0x66, 0x55, 0x46, 0x73, 0x18, 0x5a, 0x66, 0xa5, 0x94, 0x17, 0x64, 0x8d, 0xf1, 0xbf, 0x58, + 0x20, 0xa3, 0xdb, 0x50, 0x89, 0xdc, 0x36, 0x59, 0x68, 0x36, 0x85, 0xda, 0xfc, 0x01, 0x22, 0x52, + 0x6c, 0x48, 0x02, 0x38, 0xa6, 0x65, 0x7f, 0xa1, 0x04, 0x10, 0x47, 0x55, 0xea, 0xf5, 0x89, 0x8b, + 0x29, 0x85, 0xea, 0x85, 0x0c, 0x85, 0x2a, 0x8a, 0x09, 0x66, 0x68, 0x53, 0xd5, 0x30, 0x95, 0xfb, + 0x1a, 0xa6, 0x81, 0xc3, 0x0c, 0xd3, 0x12, 0x4c, 0xc7, 0x51, 0xa1, 0xcc, 0xa0, 0x78, 0xec, 0xfa, + 0xdc, 0x48, 0x02, 0x71, 0x1a, 0xdf, 0x26, 0x70, 0x5e, 0x05, 0xc7, 0x11, 0x37, 0x1a, 0xb3, 0xca, + 0xd6, 0x15, 0xd4, 0x3d, 0xc6, 0x29, 0xd6, 0x18, 0x97, 0x72, 0x35, 0xc6, 0x3f, 0x69, 0xc1, 0xc9, + 0x64, 0x3b, 0xcc, 0x85, 0xf9, 0xf3, 0x16, 0x9c, 0x62, 0x7a, 0x73, 0xd6, 0x6a, 0x5a, 0x4b, 0xff, + 0x42, 0x61, 0xc0, 0x9f, 0x9c, 0x1e, 0xc7, 0x61, 0x43, 0xd6, 0xb2, 0x48, 0xe3, 0xec, 0x16, 0xed, + 0xff, 0x50, 0x82, 0x99, 0xbc, 0x48, 0x41, 0xcc, 0x69, 0xc3, 0xb9, 0x5b, 0xdf, 0x25, 0x77, 0x84, + 0x69, 0x7c, 0xec, 0xb4, 0xc1, 0x8b, 0xb1, 0x84, 0x27, 0xb3, 0x2a, 0x94, 0xfa, 0xcc, 0xaa, 0xb0, + 0x03, 0xd3, 0x77, 0x76, 0x88, 0x77, 0xd3, 0x0b, 0x9d, 0xc8, 0x0d, 0xb7, 0x5c, 0xa6, 0x63, 0xe6, + 0xeb, 0x46, 0xa6, 0x62, 0x9d, 0xbe, 0x9d, 0x44, 0xb8, 0x7f, 0x30, 0x77, 0xd6, 0x28, 0x88, 0xbb, + 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0x4e, 0x4a, 0x31, 0xf0, 0x10, 0x93, 0x52, 0xd8, 0x9f, 0xb7, + 0xe0, 0x4c, 0x6e, 0x5e, 0x62, 0x74, 0x11, 0x46, 0x9c, 0x8e, 0xcb, 0xc5, 0xf4, 0xe2, 0x18, 0x65, + 0xe2, 0xa0, 0xda, 0x2a, 0x17, 0xd2, 0x2b, 0x28, 0x3d, 0xbd, 0x76, 0x5d, 0xaf, 0x99, 0x3c, 0xbd, + 0xae, 0xb9, 0x5e, 0x13, 0x33, 0x88, 0x3a, 0x8e, 0xcb, 0x79, 0xc7, 0xb1, 0xfd, 0x7d, 0x16, 0x08, + 0x87, 0xd3, 0x3e, 0xce, 0xee, 0x4f, 0xc0, 0xd8, 0x5e, 0x3a, 0x71, 0xd5, 0xf9, 0x7c, 0x0f, 0x5c, + 0x91, 0xae, 0x4a, 0x31, 0x64, 0x46, 0x92, 0x2a, 0x83, 0x96, 0xdd, 0x04, 0x01, 0xad, 0x12, 0x26, + 0x84, 0xee, 0xdd, 0x9b, 0xe7, 0x00, 0x9a, 0x0c, 0x97, 0x65, 0xb3, 0x2c, 0x99, 0x37, 0x73, 0x55, + 0x41, 0xb0, 0x86, 0x65, 0xff, 0xbb, 0x12, 0x8c, 0xca, 0x44, 0x49, 0x5d, 0xaf, 0x1f, 0x51, 0xd1, + 0xa1, 0x32, 0xa7, 0xa2, 0x4b, 0x50, 0x61, 0xb2, 0xcc, 0x5a, 0x2c, 0x61, 0x53, 0x92, 0x84, 0x35, + 0x09, 0xc0, 0x31, 0x0e, 0xdd, 0x45, 0x61, 0x77, 0x93, 0xa1, 0x27, 0xdc, 0x23, 0xeb, 0xbc, 0x18, + 0x4b, 0x38, 0xfa, 0x18, 0x4c, 0xf1, 0x7a, 0x81, 0xdf, 0x71, 0xb6, 0xb9, 0xfe, 0x63, 0x50, 0xc5, + 0x9c, 0x98, 0x5a, 0x4b, 0xc0, 0xee, 0x1f, 0xcc, 0x9d, 0x4c, 0x96, 0x31, 0xc5, 0x5e, 0x8a, 0x0a, + 0x33, 0x73, 0xe2, 0x8d, 0xd0, 0xdd, 0x9f, 0xb2, 0x8e, 0x8a, 0x41, 0x58, 0xc7, 0xb3, 0x3f, 0x0d, + 0x28, 0x9d, 0x32, 0x0a, 0xbd, 0xc6, 0x6d, 0x5b, 0xdd, 0x80, 0x34, 0x8b, 0x14, 0x7d, 0x7a, 0x64, + 0x05, 0xe9, 0xd9, 0xc4, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x8b, 0x65, 0x98, 0x4a, 0xfa, 0x72, 0xa3, + 0xab, 0x30, 0xc4, 0x59, 0x0f, 0x41, 0xbe, 0xc0, 0x8e, 0x44, 0xf3, 0x00, 0x67, 0x87, 0xb0, 0xe0, + 0x5e, 0x44, 0x7d, 0xf4, 0x06, 0x8c, 0x36, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0x68, 0x2e, 0xd4, 0x56, + 0xc5, 0x72, 0xce, 0x7c, 0xd8, 0x56, 0x63, 0x34, 0xdd, 0xab, 0x9c, 0xe9, 0x4c, 0x63, 0x10, 0xd6, + 0xc9, 0xa1, 0x0d, 0x16, 0x67, 0x7e, 0xcb, 0xdd, 0x5e, 0x73, 0x3a, 0x45, 0x8e, 0x0e, 0x4b, 0x12, + 0x49, 0xa3, 0x3c, 0x2e, 0x82, 0xd1, 0x73, 0x00, 0x8e, 0x09, 0xa1, 0xcf, 0xc2, 0x89, 0x30, 0x47, + 0xdc, 0x9e, 0x97, 0x41, 0xb0, 0x48, 0x02, 0xbd, 0xf8, 0xc8, 0xbd, 0x83, 0xb9, 0x13, 0x59, 0x82, + 0xf9, 0xac, 0x66, 0xec, 0x2f, 0x9e, 0x04, 0x63, 0x13, 0x1b, 0x09, 0x65, 0xad, 0x23, 0x4a, 0x28, + 0x8b, 0x61, 0x84, 0xb4, 0x3b, 0xd1, 0x7e, 0xd5, 0x0d, 0x8a, 0xd2, 0xea, 0x2f, 0x0b, 0x9c, 0x34, + 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0xd6, 0xdf, 0xf2, 0x37, 0x30, 0xeb, 0xef, 0xc0, 0x31, 0x66, + 0xfd, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xe3, 0x0b, 0xa6, 0x3f, 0x73, 0x1d, 0x5e, 0xe1, + 0x28, 0xe9, 0xfc, 0x92, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x9a, 0xda, 0x81, 0x43, 0xf9, 0x0f, 0xf3, + 0xb4, 0xc1, 0x43, 0xe6, 0x1e, 0x14, 0xb9, 0x7d, 0x87, 0x1f, 0x34, 0xb7, 0xef, 0x8a, 0xcc, 0xc8, + 0x3b, 0x92, 0xef, 0x95, 0xc4, 0x12, 0xee, 0xf6, 0xc8, 0xc3, 0x7b, 0x4b, 0xcf, 0x62, 0x5c, 0xc9, + 0x3f, 0x09, 0x54, 0x82, 0xe2, 0x3e, 0x73, 0x17, 0x7f, 0x9f, 0x05, 0xa7, 0x3a, 0x59, 0x09, 0xbd, + 0x85, 0x6d, 0xc0, 0x8b, 0x7d, 0xe7, 0x0c, 0x37, 0x1a, 0x64, 0x32, 0xb5, 0xec, 0xac, 0xf0, 0xd9, + 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9b, 0x42, 0x47, 0xfd, 0x44, 0x4e, 0x12, 0xe4, 0x82, 0xd4, 0xc7, + 0x1b, 0x19, 0x09, 0x77, 0xdf, 0x9b, 0x97, 0x70, 0xb7, 0xef, 0x34, 0xbb, 0xaf, 0xa9, 0xf4, 0xc7, + 0xe3, 0xf9, 0x4b, 0x89, 0x27, 0x37, 0xee, 0x99, 0xf4, 0xf8, 0x35, 0x95, 0xf4, 0xb8, 0x20, 0x1e, + 0x30, 0x4f, 0x69, 0xdc, 0x33, 0xd5, 0xb1, 0x96, 0xae, 0x78, 0xf2, 0x68, 0xd2, 0x15, 0x1b, 0x57, + 0x0d, 0xcf, 0x98, 0xfb, 0x74, 0x8f, 0xab, 0xc6, 0xa0, 0x5b, 0x7c, 0xd9, 0xf0, 0xd4, 0xcc, 0xd3, + 0x0f, 0x94, 0x9a, 0xf9, 0x96, 0x9e, 0xea, 0x18, 0xf5, 0xc8, 0xe5, 0x4b, 0x91, 0xfa, 0x4c, 0x70, + 0x7c, 0x4b, 0xbf, 0x00, 0x4f, 0xe4, 0xd3, 0x55, 0xf7, 0x5c, 0x9a, 0x6e, 0xe6, 0x15, 0x98, 0x4a, + 0x9c, 0x7c, 0xf2, 0x78, 0x12, 0x27, 0x9f, 0x3a, 0xf2, 0xc4, 0xc9, 0xa7, 0x8f, 0x21, 0x71, 0xf2, + 0x23, 0xc7, 0x98, 0x38, 0xf9, 0x16, 0x33, 0xa8, 0xe1, 0x61, 0x7b, 0x44, 0xfc, 0xe2, 0xa7, 0x72, + 0xa2, 0x5e, 0xa5, 0x63, 0xfb, 0xf0, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0xca, 0x48, 0xc8, 0x3c, 0xf3, + 0x10, 0x12, 0x32, 0xaf, 0xc7, 0x09, 0x99, 0xcf, 0xe4, 0x4f, 0x75, 0x86, 0x0b, 0x46, 0x4e, 0x1a, + 0xe6, 0x5b, 0x7a, 0xfa, 0xe4, 0x47, 0x0b, 0xb4, 0x26, 0x59, 0x82, 0xc7, 0x82, 0xa4, 0xc9, 0xaf, + 0xf2, 0xa4, 0xc9, 0x8f, 0xe5, 0x9f, 0xe4, 0xc9, 0xeb, 0xce, 0x48, 0x95, 0x4c, 0xfb, 0xa5, 0xc2, + 0x5e, 0xb2, 0x48, 0xcd, 0x39, 0xfd, 0x52, 0x71, 0x33, 0xd3, 0xfd, 0x52, 0x20, 0x1c, 0x93, 0xb2, + 0x7f, 0xa0, 0x04, 0xe7, 0x8a, 0xf7, 0x5b, 0x2c, 0x4d, 0xad, 0xc5, 0x4a, 0xe4, 0x84, 0x34, 0x95, + 0xbf, 0xd9, 0x62, 0xac, 0xbe, 0xa3, 0xf8, 0x5d, 0x81, 0x69, 0xe5, 0xbb, 0xd1, 0x72, 0x1b, 0xfb, + 0xeb, 0xf1, 0xcb, 0x57, 0xf9, 0xbb, 0xd7, 0x93, 0x08, 0x38, 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, + 0xb8, 0x5a, 0x15, 0x6f, 0x33, 0x25, 0xbe, 0xad, 0x9b, 0x60, 0x9c, 0xc4, 0xb7, 0xbf, 0x64, 0xc1, + 0x23, 0x39, 0x19, 0x07, 0xfb, 0x0e, 0x52, 0xb7, 0x05, 0x93, 0x1d, 0xb3, 0x6a, 0x8f, 0xb8, 0x9a, + 0x46, 0x5e, 0x43, 0xd5, 0xd7, 0x04, 0x00, 0x27, 0x89, 0xda, 0x3f, 0x5d, 0x82, 0xb3, 0x85, 0xc6, + 0x88, 0x08, 0xc3, 0xe9, 0xed, 0x76, 0xe8, 0x2c, 0x05, 0xa4, 0x49, 0xbc, 0xc8, 0x75, 0x5a, 0xf5, + 0x0e, 0x69, 0x68, 0xf2, 0x70, 0x66, 0xd5, 0x77, 0x65, 0xad, 0xbe, 0x90, 0xc6, 0xc0, 0x39, 0x35, + 0xd1, 0x0a, 0xa0, 0x34, 0x44, 0xcc, 0x30, 0x8b, 0xf9, 0x9d, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x0f, + 0xc1, 0xb8, 0x32, 0x72, 0xd4, 0x66, 0x9c, 0x1d, 0xec, 0x58, 0x07, 0x60, 0x13, 0x0f, 0x5d, 0xe6, + 0x41, 0xe3, 0x45, 0x7a, 0x01, 0x21, 0x3c, 0x9f, 0x94, 0x11, 0xe1, 0x45, 0x31, 0xd6, 0x71, 0x16, + 0x2f, 0xfe, 0xda, 0xef, 0x9f, 0x7b, 0xcf, 0x6f, 0xfe, 0xfe, 0xb9, 0xf7, 0xfc, 0xce, 0xef, 0x9f, + 0x7b, 0xcf, 0x77, 0xdd, 0x3b, 0x67, 0xfd, 0xda, 0xbd, 0x73, 0xd6, 0x6f, 0xde, 0x3b, 0x67, 0xfd, + 0xce, 0xbd, 0x73, 0xd6, 0xef, 0xdd, 0x3b, 0x67, 0x7d, 0xe1, 0x0f, 0xce, 0xbd, 0xe7, 0x13, 0xa5, + 0xbd, 0xcb, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xc9, 0x26, 0x9f, 0xc5, 0x07, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -9923,53 +9992,6 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EphemeralContainers) > 0 { - for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *EphemeralVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10583,6 +10605,39 @@ func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *GRPCAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GRPCAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GRPCAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Service != nil { + i -= len(*m.Service) + copy(dAtA[i:], *m.Service) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Service))) + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10805,65 +10860,6 @@ func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TCPSocket != nil { - { - size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.HTTPGet != nil { - { - size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Exec != nil { - { - size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11217,6 +11213,65 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LifecycleHandler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHandler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13230,6 +13285,18 @@ func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.DataSourceRef != nil { + { + size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if m.DataSource != nil { { size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) @@ -13315,6 +13382,42 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l + if m.ResizeStatus != nil { + i -= len(*m.ResizeStatus) + copy(dAtA[i:], *m.ResizeStatus) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResizeStatus))) + i-- + dAtA[i] = 0x32 + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -14654,6 +14757,34 @@ func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodOS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodOS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodOS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14907,6 +15038,20 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OS != nil { + { + size, err := m.OS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } if m.SetHostnameAsFQDN != nil { i-- if *m.SetHostnameAsFQDN { @@ -15858,7 +16003,7 @@ func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 { - size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ProbeHandler.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -15870,6 +16015,77 @@ func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ProbeHandler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProbeHandler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProbeHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.GRPC != nil { + { + size, err := m.GRPC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -18111,17 +18327,6 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x8a } - if len(m.TopologyKeys) > 0 { - for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TopologyKeys[iNdEx]) - copy(dAtA[i:], m.TopologyKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - } if m.SessionAffinityConfig != nil { { size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -19496,6 +19701,16 @@ func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.HostProcess != nil { + i-- + if *m.HostProcess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } if m.RunAsUserName != nil { i -= len(*m.RunAsUserName) copy(dAtA[i:], *m.RunAsUserName) @@ -20608,23 +20823,6 @@ func (m *EphemeralContainerCommon) Size() (n int) { return n } -func (m *EphemeralContainers) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.EphemeralContainers) > 0 { - for _, e := range m.EphemeralContainers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *EphemeralVolumeSource) Size() (n int) { if m == nil { return 0 @@ -20844,6 +21042,20 @@ func (m *GCEPersistentDiskVolumeSource) Size() (n int) { return n } +func (m *GRPCAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + if m.Service != nil { + l = len(*m.Service) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *GitRepoVolumeSource) Size() (n int) { if m == nil { return 0 @@ -20927,27 +21139,6 @@ func (m *HTTPHeader) Size() (n int) { return n } -func (m *Handler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *HostAlias) Size() (n int) { if m == nil { return 0 @@ -21083,6 +21274,27 @@ func (m *Lifecycle) Size() (n int) { return n } +func (m *LifecycleHandler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *LimitRange) Size() (n int) { if m == nil { return 0 @@ -21838,6 +22050,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = m.DataSource.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.DataSourceRef != nil { + l = m.DataSourceRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -21870,6 +22086,19 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ResizeStatus != nil { + l = len(*m.ResizeStatus) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22325,6 +22554,17 @@ func (m *PodLogOptions) Size() (n int) { return n } +func (m *PodOS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodPortForwardOptions) Size() (n int) { if m == nil { return 0 @@ -22561,6 +22801,10 @@ func (m *PodSpec) Size() (n int) { if m.SetHostnameAsFQDN != nil { n += 3 } + if m.OS != nil { + l = m.OS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -22755,7 +22999,7 @@ func (m *Probe) Size() (n int) { } var l int _ = l - l = m.Handler.Size() + l = m.ProbeHandler.Size() n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) @@ -22768,6 +23012,31 @@ func (m *Probe) Size() (n int) { return n } +func (m *ProbeHandler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GRPC != nil { + l = m.GRPC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ProjectedVolumeSource) Size() (n int) { if m == nil { return 0 @@ -23602,12 +23871,6 @@ func (m *ServiceSpec) Size() (n int) { l = m.SessionAffinityConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } if m.IPFamilyPolicy != nil { l = len(*m.IPFamilyPolicy) n += 2 + l + sovGenerated(uint64(l)) @@ -24098,6 +24361,9 @@ func (m *WindowsSecurityContextOptions) Size() (n int) { l = len(*m.RunAsUserName) n += 1 + l + sovGenerated(uint64(l)) } + if m.HostProcess != nil { + n += 2 + } return n } @@ -24906,22 +25172,6 @@ func (this *EphemeralContainerCommon) String() string { }, "") return s } -func (this *EphemeralContainers) String() string { - if this == nil { - return "nil" - } - repeatedStringForEphemeralContainers := "[]EphemeralContainer{" - for _, f := range this.EphemeralContainers { - repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," - } - repeatedStringForEphemeralContainers += "}" - s := strings.Join([]string{`&EphemeralContainers{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, - `}`, - }, "") - return s -} func (this *EphemeralVolumeSource) String() string { if this == nil { return "nil" @@ -25090,6 +25340,17 @@ func (this *GCEPersistentDiskVolumeSource) String() string { }, "") return s } +func (this *GRPCAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GRPCAction{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Service:` + valueToStringGenerated(this.Service) + `,`, + `}`, + }, "") + return s +} func (this *GitRepoVolumeSource) String() string { if this == nil { return "nil" @@ -25157,18 +25418,6 @@ func (this *HTTPHeader) String() string { }, "") return s } -func (this *Handler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s -} func (this *HostAlias) String() string { if this == nil { return "nil" @@ -25248,8 +25497,20 @@ func (this *Lifecycle) String() string { return "nil" } s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `PostStart:` + strings.Replace(this.PostStart.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHandler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LifecycleHandler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, `}`, }, "") return s @@ -25885,6 +26146,7 @@ func (this *PersistentVolumeClaimSpec) String() string { `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, `}`, }, "") return s @@ -25908,11 +26170,23 @@ func (this *PersistentVolumeClaimStatus) String() string { mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) } mapStringForCapacity += "}" + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `Capacity:` + mapStringForCapacity + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `ResizeStatus:` + valueToStringGenerated(this.ResizeStatus) + `,`, `}`, }, "") return s @@ -26220,6 +26494,16 @@ func (this *PodLogOptions) String() string { }, "") return s } +func (this *PodOS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodOS{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *PodPortForwardOptions) String() string { if this == nil { return "nil" @@ -26389,6 +26673,7 @@ func (this *PodSpec) String() string { `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, `SetHostnameAsFQDN:` + valueToStringGenerated(this.SetHostnameAsFQDN) + `,`, + `OS:` + strings.Replace(this.OS.String(), "PodOS", "PodOS", 1) + `,`, `}`, }, "") return s @@ -26552,7 +26837,7 @@ func (this *Probe) String() string { return "nil" } s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `ProbeHandler:` + strings.Replace(strings.Replace(this.ProbeHandler.String(), "ProbeHandler", "ProbeHandler", 1), `&`, ``, 1) + `,`, `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, @@ -26563,6 +26848,19 @@ func (this *Probe) String() string { }, "") return s } +func (this *ProbeHandler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProbeHandler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `GRPC:` + strings.Replace(this.GRPC.String(), "GRPCAction", "GRPCAction", 1) + `,`, + `}`, + }, "") + return s +} func (this *ProjectedVolumeSource) String() string { if this == nil { return "nil" @@ -27224,7 +27522,6 @@ func (this *ServiceSpec) String() string { `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, `IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`, `ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`, `IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`, @@ -27519,6 +27816,7 @@ func (this *WindowsSecurityContextOptions) String() string { `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, + `HostProcess:` + valueToStringGenerated(this.HostProcess) + `,`, `}`, }, "") return s @@ -36642,7 +36940,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } return nil } -func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { +func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36665,15 +36963,15 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36700,41 +36998,10 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.VolumeClaimTemplate == nil { + m.VolumeClaimTemplate = &PersistentVolumeClaimTemplate{} } - m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) - if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.VolumeClaimTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36759,7 +37026,7 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { } return nil } -func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Event) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36782,15 +37049,15 @@ func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EphemeralVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Event: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EphemeralVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36817,66 +37084,110 @@ func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeClaimTemplate == nil { - m.VolumeClaimTemplate = &PersistentVolumeClaimTemplate{} - } - if err := m.VolumeClaimTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36903,13 +37214,13 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36936,15 +37247,15 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36954,27 +37265,47 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37002,11 +37333,11 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -37033,163 +37364,13 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -38838,6 +39019,108 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *GRPCAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GRPCAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GRPCAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Service = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -39612,7 +39895,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } return nil } -func (m *Handler) Unmarshal(dAtA []byte) error { +func (m *HostAlias) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39635,17 +39918,17 @@ func (m *Handler) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Handler: wiretype end group for non-group") + return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39655,69 +39938,29 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Exec == nil { - m.Exec = &ExecAction{} - } - if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTPGet == nil { - m.HTTPGet = &HTTPGetAction{} - } - if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39727,27 +39970,23 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TCPSocket == nil { - m.TCPSocket = &TCPSocketAction{} - } - if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -39770,7 +40009,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostAlias) Unmarshal(dAtA []byte) error { +func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39793,15 +40032,15 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -39829,11 +40068,11 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -39861,7 +40100,8 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) + s := HostPathType(dAtA[iNdEx:postIndex]) + m.Type = &s iNdEx = postIndex default: iNdEx = preIndex @@ -39884,7 +40124,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39907,15 +40147,182 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -39943,11 +40350,87 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -39975,8 +40458,8 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := HostPathType(dAtA[iNdEx:postIndex]) - m.Type = &s + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -39999,7 +40482,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40022,10 +40505,10 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -40277,7 +40760,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &SecretReference{} + m.SecretRef = &LocalObjectReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40357,7 +40840,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *KeyToPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40380,15 +40863,15 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40416,11 +40899,11 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetPortal = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40448,32 +40931,13 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IQN = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - m.Lun = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lun |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40483,81 +40947,67 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.ReadOnly = bool(v != 0) - case 7: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40567,47 +41017,31 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + if m.PostStart == nil { + m.PostStart = &LifecycleHandler{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.DiscoveryCHAPAuth = bool(v != 0) - case 10: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40634,66 +41068,13 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} + if m.PreStop == nil { + m.PreStop = &LifecycleHandler{} } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SessionCHAPAuth = bool(v != 0) - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.InitiatorName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40715,7 +41096,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *KeyToPath) Unmarshal(dAtA []byte) error { +func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40738,49 +41119,17 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + return fmt.Errorf("proto: LifecycleHandler: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LifecycleHandler: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40790,97 +41139,31 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if m.Exec == nil { + m.Exec = &ExecAction{} } - m.Mode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lifecycle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40907,16 +41190,16 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PostStart == nil { - m.PostStart = &Handler{} + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} } - if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40943,10 +41226,10 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PreStop == nil { - m.PreStop = &Handler{} + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} } - if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -47641,6 +47924,42 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSourceRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DataSourceRef == nil { + m.DataSourceRef = &TypedLocalObjectReference{} + } + if err := m.DataSourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47913,10 +48232,172 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, PersistentVolumeClaimCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Conditions = append(m.Conditions, PersistentVolumeClaimCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizeStatus", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeClaimResizeStatus(dAtA[iNdEx:postIndex]) + m.ResizeStatus = &s iNdEx = postIndex default: iNdEx = preIndex @@ -51712,6 +52193,88 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodOS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodOS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodOS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = OSName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -53701,32 +54264,68 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) - if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SetHostnameAsFQDN", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SetHostnameAsFQDN = &b + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OS == nil { + m.OS = &PodOS{} + } + if err := m.OS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 35: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SetHostnameAsFQDN", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.SetHostnameAsFQDN = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55357,7 +55956,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProbeHandler", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -55384,7 +55983,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Handler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProbeHandler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -55524,6 +56123,200 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } return nil } +func (m *ProbeHandler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProbeHandler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProbeHandler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GRPC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GRPC == nil { + m.GRPC = &GRPCAction{} + } + if err := m.GRPC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -63111,38 +63904,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType) @@ -67320,6 +68081,27 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.RunAsUserName = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostProcess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.HostProcess = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 152ea29fd05b6..b5b44781f02b4 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -498,6 +498,7 @@ message ConfigMapEnvSource { } // Selects a key from a ConfigMap. +// +structType=atomic message ConfigMapKeySelector { // The ConfigMap to select from. optional LocalObjectReference localObjectReference = 1; @@ -521,6 +522,7 @@ message ConfigMapList { } // ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration message ConfigMapNodeConfigSource { // Namespace is the metadata.namespace of the referenced ConfigMap. // This field is required in all cases. @@ -621,10 +623,10 @@ message Container { // Entrypoint array. Not executed within a shell. // The docker image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional repeated string command = 3; @@ -632,10 +634,10 @@ message Container { // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional repeated string args = 4; @@ -754,8 +756,8 @@ message Container { // +optional optional string imagePullPolicy = 14; - // Security options the pod should run with. - // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional optional SecurityContext securityContext = 15; @@ -786,6 +788,7 @@ message Container { message ContainerImage { // Names by which this image is known. // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // +optional repeated string names = 1; // The size of the image in bytes. @@ -905,15 +908,11 @@ message ContainerStatus { // Specifies whether the container has passed its readiness probe. optional bool ready = 4; - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. + // The number of times the container has been restarted. optional int32 restartCount = 5; // The image the container is running. - // More info: https://kubernetes.io/docs/concepts/containers/images - // TODO(dchen1107): Which image the container is running with? + // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; // ImageID of the container's image. @@ -1010,6 +1009,7 @@ message EmptyDirVolumeSource { } // EndpointAddress is a tuple that describes single IP address. +// +structType=atomic message EndpointAddress { // The IP of this endpoint. // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), @@ -1033,6 +1033,7 @@ message EndpointAddress { } // EndpointPort is a tuple that describes a single port. +// +structType=atomic message EndpointPort { // The name of this port. This must match the 'name' field in the // corresponding ServicePort. @@ -1056,8 +1057,6 @@ message EndpointPort { // RFC-6335 and http://www.iana.org/assignments/service-names). // Non-standard protocols should use prefixed names such as // mycompany.com/my-custom-protocol. - // This is a beta field that is guarded by the ServiceAppProtocol feature - // gate and enabled by default. // +optional optional string appProtocol = 4; } @@ -1150,11 +1149,12 @@ message EnvVar { optional string name = 1; // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and + // using the previously defined environment variables in the container and // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable + // the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + // Escaped references will never be expanded, regardless of whether the variable // exists or not. // Defaults to "". // +optional @@ -1186,15 +1186,16 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } -// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// An EphemeralContainer is a temporary container that you may add to an existing Pod for // user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a pod is -// removed or restarted. If an ephemeral container causes a pod to exceed its resource -// allocation, the pod may be evicted. -// Ephemeral containers may not be added by directly updating the pod spec. They must be added -// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec -// once added. -// This is an alpha feature enabled by the EphemeralContainers feature flag. +// scheduling guarantees, and they will not be restarted when they exit or when a Pod is +// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the +// Pod to exceed its resource allocation. +// +// To add an ephemeral container, use the ephemeralcontainers subresource of an existing +// Pod. Ephemeral containers may not be removed or restarted. +// +// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. message EphemeralContainer { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -1204,8 +1205,10 @@ message EphemeralContainer { // If set, the name of the container from PodSpec that this ephemeral container targets. // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container is run in whatever namespaces are shared - // for the pod. Note that the container runtime must support this feature. + // If not set then the ephemeral container uses the namespaces configured in the Pod spec. + // + // The container runtime must implement support for this feature. If the runtime does not + // support namespace targeting then the result of setting this field is undefined. // +optional optional string targetContainerName = 2; } @@ -1226,10 +1229,10 @@ message EphemeralContainerCommon { // Entrypoint array. Not executed within a shell. // The docker image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional repeated string command = 3; @@ -1237,10 +1240,10 @@ message EphemeralContainerCommon { // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional repeated string args = 4; @@ -1253,6 +1256,12 @@ message EphemeralContainerCommon { optional string workingDir = 5; // Ports are not allowed for ephemeral containers. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -1276,7 +1285,7 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; - // Pod volumes to mount into the container's filesystem. + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional // +patchMergeKey=mountPath @@ -1333,7 +1342,8 @@ message EphemeralContainerCommon { // +optional optional string imagePullPolicy = 14; - // SecurityContext is not allowed for ephemeral containers. + // Optional: SecurityContext defines the security options the ephemeral container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. // +optional optional SecurityContext securityContext = 15; @@ -1359,19 +1369,6 @@ message EphemeralContainerCommon { optional bool tty = 18; } -// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. -message EphemeralContainers { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // A list of ephemeral containers associated with this pod. New ephemeral containers - // may be appended to this list, but existing ephemeral containers may not be removed - // or modified. - // +patchMergeKey=name - // +patchStrategy=merge - repeated EphemeralContainer ephemeralContainers = 2; -} - // Represents an ephemeral volume that is handled by a normal storage driver. message EphemeralVolumeSource { // Will be used to create a stand-alone PVC to provision the volume. @@ -1649,6 +1646,19 @@ message GCEPersistentDiskVolumeSource { optional bool readOnly = 4; } +message GRPCAction { + // Port number of the gRPC service. Number must be in the range 1 to 65535. + optional int32 port = 1; + + // Service is the name of the service to place in the gRPC HealthCheckRequest + // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + // + // If this is not specified, the default behavior is defined by gRPC. + // +optional + // +default="" + optional string service = 2; +} + // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. @@ -1749,25 +1759,6 @@ message HTTPHeader { optional string value = 2; } -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -message Handler { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - optional ExecAction exec = 1; - - // HTTPGet specifies the http request to perform. - // +optional - optional HTTPGetAction httpGet = 2; - - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - // +optional - optional TCPSocketAction tcpSocket = 3; -} - // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. message HostAlias { @@ -1935,20 +1926,37 @@ message Lifecycle { // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - optional Handler postStart = 1; + optional LifecycleHandler postStart = 1; // PreStop is called immediately before a container is terminated due to an // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container crashes or exits. The Pod's termination grace period countdown begins before the + // PreStop hook is executed. Regardless of the outcome of the handler, the // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes + // period (unless delayed by finalizers). Other management of the container blocks until the hook completes // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - optional Handler preStop = 2; + optional LifecycleHandler preStop = 2; +} + +// LifecycleHandler defines a specific action that should be taken in a lifecycle +// hook. One and only one of the fields, except TCPSocket must be specified. +message LifecycleHandler { + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + // for the backward compatibility. There are no validation of this field and + // lifecycle hooks will fail in runtime when tcp handler is specified. + // +optional + optional TCPSocketAction tcpSocket = 3; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -2049,6 +2057,7 @@ message LoadBalancerStatus { // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. +// +structType=atomic message LocalObjectReference { // Name of the referent. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -2066,7 +2075,7 @@ message LocalVolumeSource { // Filesystem type to mount. // It applies only when the Path is a block device. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. // +optional optional string fsType = 2; } @@ -2240,6 +2249,7 @@ message NodeCondition { } // NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +// This API is deprecated since 1.22 message NodeConfigSource { // ConfigMap is a reference to a Node's ConfigMap optional ConfigMapNodeConfigSource configMap = 2; @@ -2330,6 +2340,7 @@ message NodeResources { // A node selector represents the union of the results of one or more label queries // over a set of nodes; that is, it represents the OR of the selectors represented // by the node selector terms. +// +structType=atomic message NodeSelector { // Required. A list of node selector terms. The terms are ORed. repeated NodeSelectorTerm nodeSelectorTerms = 1; @@ -2357,6 +2368,7 @@ message NodeSelectorRequirement { // A null or empty node selector term matches no objects. The requirements of // them are ANDed. // The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +// +structType=atomic message NodeSelectorTerm { // A list of node selector requirements by node's labels. // +optional @@ -2393,8 +2405,9 @@ message NodeSpec { // +optional repeated Taint taints = 5; - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // Deprecated. If specified, the source of the node's configuration. + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. + // This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration // +optional optional NodeConfigSource configSource = 6; @@ -2504,6 +2517,7 @@ message NodeSystemInfo { } // ObjectFieldSelector selects an APIVersioned field of an object. +// +structType=atomic message ObjectFieldSelector { // Version of the schema the FieldPath is written in terms of, defaults to "v1". // +optional @@ -2529,6 +2543,7 @@ message ObjectFieldSelector { // Instead of using this type, create a locally provided and used type that is well-focused on your reference. // For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +structType=atomic message ObjectReference { // Kind of the referent. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds @@ -2663,6 +2678,9 @@ message PersistentVolumeClaimSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // Resources represents the minimum resources the volume should have. + // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + // that are lower than previous value but must still be higher than capacity recorded in the + // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional optional ResourceRequirements resources = 2; @@ -2684,13 +2702,32 @@ message PersistentVolumeClaimSpec { // This field can be used to specify either: // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource that implements data population (Alpha) - // In order to use custom resource types that implement data population, - // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. + // If the AnyVolumeDataSource feature gate is enabled, this field will always have + // the same contents as the DataSourceRef field. // +optional optional TypedLocalObjectReference dataSource = 7; + + // Specifies the object from which to populate the volume with data, if a non-empty + // volume is desired. This may be any local object from a non-empty API group (non + // core object) or a PersistentVolumeClaim object. + // When this field is specified, volume binding will only succeed if the type of + // the specified object matches some installed volume populator or dynamic + // provisioner. + // This field will replace the functionality of the DataSource field and as such + // if both fields are non-empty, they must have the same value. For backwards + // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // value automatically if one of them is empty and the other is non-empty. + // There are two important differences between DataSource and DataSourceRef: + // * While DataSource only allows two specific types of objects, DataSourceRef + // allows any non-core object, as well as PersistentVolumeClaim objects. + // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // preserves all values, and generates an error if a disallowed value is + // specified. + // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // +optional + optional TypedLocalObjectReference dataSourceRef = 8; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2714,6 +2751,26 @@ message PersistentVolumeClaimStatus { // +patchMergeKey=type // +patchStrategy=merge repeated PersistentVolumeClaimCondition conditions = 4; + + // The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may + // be larger than the actual capacity when a volume expansion operation is requested. + // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. + // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. + // If a volume expansion capacity request is lowered, allocatedResources is only + // lowered if there are no expansion operations in progress and if the actual volume capacity + // is equal or lower than the requested capacity. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + map allocatedResources = 5; + + // ResizeStatus stores status of resize operation. + // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty + // string by resize controller or kubelet. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + optional string resizeStatus = 6; } // PersistentVolumeClaimTemplate is used to produce @@ -3024,7 +3081,7 @@ message PodAffinityTerm { // and the ones listed in the namespaces field. // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. - // This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; } @@ -3158,12 +3215,10 @@ message PodExecOptions { optional bool stdin = 1; // Redirect the standard output stream of the pod for this call. - // Defaults to true. // +optional optional bool stdout = 2; // Redirect the standard error stream of the pod for this call. - // Defaults to true. // +optional optional bool stderr = 3; @@ -3255,6 +3310,15 @@ message PodLogOptions { optional bool insecureSkipTLSVerifyBackend = 9; } +// PodOS defines the OS parameters of a pod. +message PodOS { + // Name is the name of the operating system. The currently supported values are linux and windows. + // Additional value may be defined in future and can be one of: + // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + // Clients should expect to handle additional values and treat unrecognized values in this field as os: null + optional string name = 1; +} + // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. // The `port` query parameter must specify the port or @@ -3290,12 +3354,14 @@ message PodSecurityContext { // container. May also be set in SecurityContext. If set in // both SecurityContext and PodSecurityContext, the value specified in SecurityContext // takes precedence for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SELinuxOptions seLinuxOptions = 1; // The Windows specific settings applied to all containers. // If unspecified, the options within a container's SecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional optional WindowsSecurityContextOptions windowsOptions = 8; @@ -3304,6 +3370,7 @@ message PodSecurityContext { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsUser = 2; @@ -3312,6 +3379,7 @@ message PodSecurityContext { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsGroup = 6; @@ -3327,6 +3395,7 @@ message PodSecurityContext { // A list of groups applied to the first process run in each container, in addition // to the container's primary GID. If unspecified, no groups will be added to // any container. + // Note that this field cannot be set when spec.os.name is windows. // +optional repeated int64 supplementalGroups = 4; @@ -3339,11 +3408,13 @@ message PodSecurityContext { // 3. The permission bits are OR'd with rw-rw---- // // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 fsGroup = 5; // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported // sysctls (by the container runtime) might fail to launch. + // Note that this field cannot be set when spec.os.name is windows. // +optional repeated Sysctl sysctls = 7; @@ -3353,10 +3424,12 @@ message PodSecurityContext { // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional string fsGroupChangePolicy = 9; // The seccomp options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 10; } @@ -3407,7 +3480,7 @@ message PodSpec { // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge @@ -3450,6 +3523,7 @@ message PodSpec { // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional + // +mapType=atomic map nodeSelector = 7; // ServiceAccountName is the name of the ServiceAccount to use to run this pod. @@ -3571,7 +3645,7 @@ message PodSpec { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates // +optional repeated PodReadinessGate readinessGates = 28; @@ -3579,7 +3653,7 @@ message PodSpec { // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class // This is a beta feature as of Kubernetes v1.14. // +optional optional string runtimeClassName = 29; @@ -3603,8 +3677,8 @@ message PodSpec { // The RuntimeClass admission controller will reject Pod create requests which have the overhead already // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional map overhead = 32; @@ -3626,6 +3700,37 @@ message PodSpec { // Default to false. // +optional optional bool setHostnameAsFQDN = 35; + + // Specifies the OS of the containers in the pod. + // Some pod and container fields are restricted if this is set. + // + // If the OS field is set to linux, the following fields must be unset: + // -securityContext.windowsOptions + // + // If the OS field is set to windows, following fields must be unset: + // - spec.hostPID + // - spec.hostIPC + // - spec.securityContext.seLinuxOptions + // - spec.securityContext.seccompProfile + // - spec.securityContext.fsGroup + // - spec.securityContext.fsGroupChangePolicy + // - spec.securityContext.sysctls + // - spec.shareProcessNamespace + // - spec.securityContext.runAsUser + // - spec.securityContext.runAsGroup + // - spec.securityContext.supplementalGroups + // - spec.containers[*].securityContext.seLinuxOptions + // - spec.containers[*].securityContext.seccompProfile + // - spec.containers[*].securityContext.capabilities + // - spec.containers[*].securityContext.readOnlyRootFilesystem + // - spec.containers[*].securityContext.privileged + // - spec.containers[*].securityContext.allowPrivilegeEscalation + // - spec.containers[*].securityContext.procMount + // - spec.containers[*].securityContext.runAsUser + // - spec.containers[*].securityContext.runAsGroup + // +optional + // This is an alpha field and requires the IdentifyPodOS feature + optional PodOS os = 36; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3720,7 +3825,7 @@ message PodStatus { optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. - // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional repeated ContainerStatus ephemeralContainerStatuses = 13; } @@ -3857,7 +3962,7 @@ message PreferredSchedulingTerm { // alive or ready to receive traffic. message Probe { // The action taken to determine the health of a container - optional Handler handler = 1; + optional ProbeHandler handler = 1; // Number of seconds after the container has started before liveness probes are initiated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes @@ -3893,11 +3998,34 @@ message Probe { // value overrides the value provided by the pod spec. // Value must be non-negative integer. The value zero indicates stop immediately via // the kill signal (no opportunity to shut down). - // This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. // +optional optional int64 terminationGracePeriodSeconds = 7; } +// ProbeHandler defines a specific action that should be taken in a probe. +// One and only one of the fields must be specified. +message ProbeHandler { + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // +optional + optional TCPSocketAction tcpSocket = 3; + + // GRPC specifies an action involving a GRPC port. + // This is an alpha field and requires enabling GRPCContainerProbe feature gate. + // +featureGate=GRPCContainerProbe + // +optional + optional GRPCAction grpc = 4; +} + // Represents a projected volume source message ProjectedVolumeSource { // list of volume projections @@ -4138,6 +4266,7 @@ message ReplicationControllerSpec { // controller, if empty defaulted to labels on Pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional + // +mapType=atomic map selector = 2; // Template is the object that describes the pod that will be created if @@ -4178,6 +4307,7 @@ message ReplicationControllerStatus { } // ResourceFieldSelector represents container resources (cpu, memory) and their output format +// +structType=atomic message ResourceFieldSelector { // Container name: required for volumes, optional for env vars // +optional @@ -4380,6 +4510,7 @@ message ScaleIOVolumeSource { // A scope selector represents the AND of the selectors represented // by the scoped-resource selector requirements. +// +structType=atomic message ScopeSelector { // A list of scope selector requirements by scope of the resources. // +optional @@ -4456,6 +4587,7 @@ message Secret { map stringData = 4; // Used to facilitate programmatic handling of secret data. + // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types // +optional optional string type = 3; } @@ -4475,6 +4607,7 @@ message SecretEnvSource { } // SecretKeySelector selects a key of a Secret. +// +structType=atomic message SecretKeySelector { // The name of the secret in the pod's namespace to select from. optional LocalObjectReference localObjectReference = 1; @@ -4525,6 +4658,7 @@ message SecretProjection { // SecretReference represents a Secret Reference. It has enough information to retrieve secret // in any namespace +// +structType=atomic message SecretReference { // Name is unique within a namespace to reference a secret resource. // +optional @@ -4577,12 +4711,14 @@ message SecretVolumeSource { message SecurityContext { // The capabilities to add/drop when running containers. // Defaults to the default set of capabilities granted by the container runtime. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional Capabilities capabilities = 1; // Run container in privileged mode. // Processes in privileged containers are essentially equivalent to root on the host. // Defaults to false. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional bool privileged = 2; @@ -4590,12 +4726,14 @@ message SecurityContext { // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SELinuxOptions seLinuxOptions = 3; // The Windows specific settings applied to all containers. // If unspecified, the options from the PodSecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional optional WindowsSecurityContextOptions windowsOptions = 10; @@ -4603,6 +4741,7 @@ message SecurityContext { // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsUser = 4; @@ -4610,6 +4749,7 @@ message SecurityContext { // Uses runtime default if unset. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsGroup = 8; @@ -4624,6 +4764,7 @@ message SecurityContext { // Whether this container has a read-only root filesystem. // Default is false. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional bool readOnlyRootFilesystem = 6; @@ -4633,6 +4774,7 @@ message SecurityContext { // AllowPrivilegeEscalation is true always when the container is: // 1) run as Privileged // 2) has CAP_SYS_ADMIN + // Note that this field cannot be set when spec.os.name is windows. // +optional optional bool allowPrivilegeEscalation = 7; @@ -4640,12 +4782,14 @@ message SecurityContext { // The default is DefaultProcMount which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional string procMount = 9; // The seccomp options to use by this container. If seccomp options are // provided at both the pod & container level, the container options // override the pod options. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 11; } @@ -4780,8 +4924,6 @@ message ServicePort { // RFC-6335 and http://www.iana.org/assignments/service-names). // Non-standard protocols should use prefixed names such as // mycompany.com/my-custom-protocol. - // This is a beta field that is guarded by the ServiceAppProtocol feature - // gate and enabled by default. // +optional optional string appProtocol = 6; @@ -4841,6 +4983,7 @@ message ServiceSpec { // Ignored if type is ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/ // +optional + // +mapType=atomic map selector = 2; // clusterIP is the IP address of the service and is usually assigned @@ -4880,12 +5023,9 @@ message ServiceSpec { // clients must ensure that clusterIPs[0] and clusterIP have the same // value. // - // Unless the "IPv6DualStack" feature gate is enabled, this field is - // limited to one value, which must be the same as the clusterIP field. If - // the feature gate is enabled, this field may hold a maximum of two - // entries (dual-stack IPs, in either order). These IPs must correspond to - // the values of the ipFamilies field. Both clusterIPs and ipFamilies are - // governed by the ipFamilyPolicy field. + // This field may hold a maximum of two entries (dual-stack IPs, in either order). + // These IPs must correspond to the values of the ipFamilies field. Both + // clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +listType=atomic // +optional @@ -4937,7 +5077,7 @@ message ServiceSpec { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional repeated string loadBalancerSourceRanges = 9; @@ -4984,35 +5124,17 @@ message ServiceSpec { // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; - // topologyKeys is a preference-order list of topology keys which - // implementations of services should use to preferentially sort endpoints - // when accessing this Service, it can not be used at the same time as - // externalTrafficPolicy=Local. - // Topology keys must be valid label keys and at most 16 keys may be specified. - // Endpoints are chosen based on the first topology key with available backends. - // If this field is specified and all entries have no backends that match - // the topology of the client, the service has no backends for that client - // and connections should fail. - // The special value "*" may be used to mean "any topology". This catch-all - // value, if used, only makes sense as the last value in the list. - // If this is not specified or empty, no topology constraints will be applied. - // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. - // This field is deprecated and will be removed in a future version. - // +optional - repeated string topologyKeys = 16; - // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - // service, and is gated by the "IPv6DualStack" feature gate. This field - // is usually assigned automatically based on cluster configuration and the - // ipFamilyPolicy field. If this field is specified manually, the requested - // family is available in the cluster, and ipFamilyPolicy allows it, it - // will be used; otherwise creation of the service will fail. This field - // is conditionally mutable: it allows for adding or removing a secondary - // IP family, but it does not allow changing the primary IP family of the - // Service. Valid values are "IPv4" and "IPv6". This field only applies - // to Services of types ClusterIP, NodePort, and LoadBalancer, and does - // apply to "headless" services. This field will be wiped when updating a - // Service to type ExternalName. + // service. This field is usually assigned automatically based on cluster + // configuration and the ipFamilyPolicy field. If this field is specified + // manually, the requested family is available in the cluster, + // and ipFamilyPolicy allows it, it will be used; otherwise creation of + // the service will fail. This field is conditionally mutable: it allows + // for adding or removing a secondary IP family, but it does not allow + // changing the primary IP family of the Service. Valid values are "IPv4" + // and "IPv6". This field only applies to Services of types ClusterIP, + // NodePort, and LoadBalancer, and does apply to "headless" services. + // This field will be wiped when updating a Service to type ExternalName. // // This field may hold a maximum of two entries (dual-stack families, in // either order). These families must correspond to the values of the @@ -5023,23 +5145,25 @@ message ServiceSpec { repeated string ipFamilies = 19; // IPFamilyPolicy represents the dual-stack-ness requested or required by - // this Service, and is gated by the "IPv6DualStack" feature gate. If - // there is no value provided, then this field will be set to SingleStack. - // Services can be "SingleStack" (a single IP family), "PreferDualStack" - // (two IP families on dual-stack configured clusters or a single IP family - // on single-stack clusters), or "RequireDualStack" (two IP families on - // dual-stack configured clusters, otherwise fail). The ipFamilies and - // clusterIPs fields depend on the value of this field. This field will be - // wiped when updating a service to type ExternalName. + // this Service. If there is no value provided, then this field will be set + // to SingleStack. Services can be "SingleStack" (a single IP family), + // "PreferDualStack" (two IP families on dual-stack configured clusters or + // a single IP family on single-stack clusters), or "RequireDualStack" + // (two IP families on dual-stack configured clusters, otherwise fail). The + // ipFamilies and clusterIPs fields depend on the value of this field. This + // field will be wiped when updating a service to type ExternalName. // +optional optional string ipFamilyPolicy = 17; // allocateLoadBalancerNodePorts defines if NodePorts will be automatically - // allocated for services with type LoadBalancer. Default is "true". It may be - // set to "false" if the cluster load-balancer does not rely on NodePorts. - // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer - // and will be cleared if the type is changed to any other type. - // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // allocated for services with type LoadBalancer. Default is "true". It + // may be set to "false" if the cluster load-balancer does not rely on + // NodePorts. If the caller requests specific NodePorts (by specifying a + // value), those requests will be respected, regardless of this field. + // This field may only be set for services with type LoadBalancer and will + // be cleared if the type is changed to any other type. + // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +featureGate=ServiceLBNodePortControl // +optional optional bool allocateLoadBalancerNodePorts = 20; @@ -5246,6 +5370,7 @@ message TopologySelectorLabelRequirement { // The requirements of them are ANDed. // It provides a subset of functionality as NodeSelectorTerm. // This is an alpha feature and may change in the future. +// +structType=atomic message TopologySelectorTerm { // A list of topology selector requirements by labels. // +optional @@ -5287,7 +5412,7 @@ message TopologySpreadConstraint { // but giving higher precedence to topologies that would help reduce the // skew. // A constraint is considered "Unsatisfiable" for an incoming pod - // if and only if every possible node assigment for that pod would violate + // if and only if every possible node assignment for that pod would violate // "MaxSkew" on some topology. // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same // labelSelector spread as 3/1/1: @@ -5312,6 +5437,7 @@ message TopologySpreadConstraint { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. +// +structType=atomic message TypedLocalObjectReference { // APIGroup is the group for the resource being referenced. // If APIGroup is not specified, the specified Kind must be in the core API group. @@ -5570,9 +5696,6 @@ message VolumeSource { // A pod can use both types of ephemeral volumes and // persistent volumes at the same time. // - // This is a beta feature and only available when the GenericEphemeralVolume - // feature gate is enabled. - // // +optional optional EphemeralVolumeSource ephemeral = 29; } @@ -5625,5 +5748,15 @@ message WindowsSecurityContextOptions { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional optional string runAsUserName = 3; + + // HostProcess determines if a container should be run as a 'Host Process' container. + // This field is alpha-level and will only be honored by components that enable the + // WindowsHostProcessContainers feature flag. Setting this field without the feature + // flag will result in errors when validating the Pod. All of a Pod's containers must + // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess + // containers and non-HostProcess containers). In addition, if HostProcess is true + // then HostNetwork must also be set to true. + // +optional + optional bool hostProcess = 4; } diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 8da1ddeade1fc..1aac0cb41e0de 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -88,7 +88,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RangeAllocation{}, &ConfigMap{}, &ConfigMapList{}, - &EphemeralContainers{}, ) // Add common types diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 3eadb45674c98..dcf83eccd0094 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -30,8 +30,6 @@ const ( NamespaceAll string = "" // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) NamespaceNodeLease string = "kube-node-lease" - // TopologyKeyAny is the service topology key that matches any node - TopologyKeyAny string = "*" ) // Volume represents a named volume in a pod that may be accessed by any container in the pod. @@ -181,9 +179,6 @@ type VolumeSource struct { // A pod can use both types of ephemeral volumes and // persistent volumes at the same time. // - // This is a beta feature and only available when the GenericEphemeralVolume - // feature gate is enabled. - // // +optional Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` } @@ -376,6 +371,7 @@ type VolumeNodeAffinity struct { } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +// +enum type PersistentVolumeReclaimPolicy string const ( @@ -391,6 +387,7 @@ const ( ) // PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +// +enum type PersistentVolumeMode string const ( @@ -477,6 +474,9 @@ type PersistentVolumeClaimSpec struct { // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // Resources represents the minimum resources the volume should have. + // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + // that are lower than previous value but must still be higher than capacity recorded in the + // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` @@ -494,13 +494,31 @@ type PersistentVolumeClaimSpec struct { // This field can be used to specify either: // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource that implements data population (Alpha) - // In order to use custom resource types that implement data population, - // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. + // If the AnyVolumeDataSource feature gate is enabled, this field will always have + // the same contents as the DataSourceRef field. // +optional DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` + // Specifies the object from which to populate the volume with data, if a non-empty + // volume is desired. This may be any local object from a non-empty API group (non + // core object) or a PersistentVolumeClaim object. + // When this field is specified, volume binding will only succeed if the type of + // the specified object matches some installed volume populator or dynamic + // provisioner. + // This field will replace the functionality of the DataSource field and as such + // if both fields are non-empty, they must have the same value. For backwards + // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // value automatically if one of them is empty and the other is non-empty. + // There are two important differences between DataSource and DataSourceRef: + // * While DataSource only allows two specific types of objects, DataSourceRef + // allows any non-core object, as well as PersistentVolumeClaim objects. + // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // preserves all values, and generates an error if a disallowed value is + // specified. + // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // +optional + DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -513,6 +531,26 @@ const ( PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" ) +// +enum +type PersistentVolumeClaimResizeStatus string + +const ( + // When expansion is complete, the empty string is set by resize controller or kubelet. + PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = "" + // State set when resize controller starts expanding the volume in control-plane + PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress" + // State set when expansion has failed in resize controller with a terminal error. + // Transient errors such as timeout should not set this status and should leave ResizeStatus + // unmodified, so as resize controller can resume the volume expansion. + PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed" + // State set when resize controller has finished expanding the volume but further expansion is needed on the node. + PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending" + // State set when kubelet starts expanding the volume. + PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress" + // State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed. + PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" +) + // PersistentVolumeClaimCondition contails details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` @@ -551,8 +589,27 @@ type PersistentVolumeClaimStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` -} - + // The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may + // be larger than the actual capacity when a volume expansion operation is requested. + // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. + // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. + // If a volume expansion capacity request is lowered, allocatedResources is only + // lowered if there are no expansion operations in progress and if the actual volume capacity + // is equal or lower than the requested capacity. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // ResizeStatus stores status of resize operation. + // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty + // string by resize controller or kubelet. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"` +} + +// +enum type PersistentVolumeAccessMode string const ( @@ -562,8 +619,12 @@ const ( ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" // can be mounted in read/write mode to many hosts ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" + // can be mounted in read/write mode to exactly 1 pod + // cannot be used in combination with other access modes + ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod" ) +// +enum type PersistentVolumePhase string const ( @@ -582,6 +643,7 @@ const ( VolumeFailed PersistentVolumePhase = "Failed" ) +// +enum type PersistentVolumeClaimPhase string const ( @@ -595,6 +657,7 @@ const ( ClaimLost PersistentVolumeClaimPhase = "Lost" ) +// +enum type HostPathType string const ( @@ -861,6 +924,7 @@ type CephFSVolumeSource struct { // SecretReference represents a Secret Reference. It has enough information to retrieve secret // in any namespace +// +structType=atomic type SecretReference struct { // Name is unique within a namespace to reference a secret resource. // +optional @@ -922,6 +986,7 @@ const ( ) // Protocol defines network protocols supported for things like container ports. +// +enum type Protocol string const ( @@ -1350,7 +1415,10 @@ type PhotonPersistentDiskVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } +// +enum type AzureDataDiskCachingMode string + +// +enum type AzureDataDiskKind string const ( @@ -1677,7 +1745,7 @@ type LocalVolumeSource struct { // Filesystem type to mount. // It applies only when the Path is a block device. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. // +optional FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } @@ -1875,6 +1943,7 @@ type VolumeMount struct { } // MountPropagationMode describes mount propagation. +// +enum type MountPropagationMode string const ( @@ -1915,11 +1984,12 @@ type EnvVar struct { // Optional: no more than one of the following may be specified. // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and + // using the previously defined environment variables in the container and // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable + // the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + // Escaped references will never be expanded, regardless of whether the variable // exists or not. // Defaults to "". // +optional @@ -1948,6 +2018,7 @@ type EnvVarSource struct { } // ObjectFieldSelector selects an APIVersioned field of an object. +// +structType=atomic type ObjectFieldSelector struct { // Version of the schema the FieldPath is written in terms of, defaults to "v1". // +optional @@ -1957,6 +2028,7 @@ type ObjectFieldSelector struct { } // ResourceFieldSelector represents container resources (cpu, memory) and their output format +// +structType=atomic type ResourceFieldSelector struct { // Container name: required for volumes, optional for env vars // +optional @@ -1969,6 +2041,7 @@ type ResourceFieldSelector struct { } // Selects a key from a ConfigMap. +// +structType=atomic type ConfigMapKeySelector struct { // The ConfigMap to select from. LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` @@ -1980,6 +2053,7 @@ type ConfigMapKeySelector struct { } // SecretKeySelector selects a key of a Secret. +// +structType=atomic type SecretKeySelector struct { // The name of the secret in the pod's namespace to select from. LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` @@ -2060,6 +2134,7 @@ type HTTPGetAction struct { } // URIScheme identifies the scheme used for connection to a host for Get actions +// +enum type URIScheme string const ( @@ -2080,6 +2155,19 @@ type TCPSocketAction struct { Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` } +type GRPCAction struct { + // Port number of the gRPC service. Number must be in the range 1 to 65535. + Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"` + + // Service is the name of the service to place in the gRPC HealthCheckRequest + // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + // + // If this is not specified, the default behavior is defined by gRPC. + // +optional + // +default="" + Service *string `json:"service" protobuf:"bytes,2,opt,name=service"` +} + // ExecAction describes a "run in container" action. type ExecAction struct { // Command is the command line to execute inside the container, the working directory for the @@ -2095,7 +2183,7 @@ type ExecAction struct { // alive or ready to receive traffic. type Probe struct { // The action taken to determine the health of a container - Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` + ProbeHandler `json:",inline" protobuf:"bytes,1,opt,name=handler"` // Number of seconds after the container has started before liveness probes are initiated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional @@ -2125,12 +2213,14 @@ type Probe struct { // value overrides the value provided by the pod spec. // Value must be non-negative integer. The value zero indicates stop immediately via // the kill signal (no opportunity to shut down). - // This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. // +optional TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,7,opt,name=terminationGracePeriodSeconds"` } // PullPolicy describes a policy for if/when to pull a container image +// +enum type PullPolicy string const ( @@ -2143,6 +2233,7 @@ const ( ) // PreemptionPolicy describes a policy for if/when to preempt a pod. +// +enum type PreemptionPolicy string const ( @@ -2153,6 +2244,7 @@ const ( ) // TerminationMessagePolicy describes how termination messages are retrieved from a container. +// +enum type TerminationMessagePolicy string const ( @@ -2212,20 +2304,20 @@ type Container struct { // Entrypoint array. Not executed within a shell. // The docker image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` @@ -2329,8 +2421,8 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images // +optional ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` - // Security options the pod should run with. - // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` @@ -2358,10 +2450,9 @@ type Container struct { TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` } -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. +// ProbeHandler defines a specific action that should be taken in a probe. +// One and only one of the fields must be specified. +type ProbeHandler struct { // Exec specifies the action to take. // +optional Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` @@ -2369,8 +2460,28 @@ type Handler struct { // +optional HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` + + // GRPC specifies an action involving a GRPC port. + // This is an alpha field and requires enabling GRPCContainerProbe feature gate. + // +featureGate=GRPCContainerProbe + // +optional + GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` +} + +// LifecycleHandler defines a specific action that should be taken in a lifecycle +// hook. One and only one of the fields, except TCPSocket must be specified. +type LifecycleHandler struct { + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` + // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + // for the backward compatibility. There are no validation of this field and + // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` } @@ -2384,19 +2495,18 @@ type Lifecycle struct { // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` + PostStart *LifecycleHandler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated due to an // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container crashes or exits. The Pod's termination grace period countdown begins before the + // PreStop hook is executed. Regardless of the outcome of the handler, the // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes + // period (unless delayed by finalizers). Other management of the container blocks until the hook completes // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` + PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` } type ConditionStatus string @@ -2480,14 +2590,10 @@ type ContainerStatus struct { LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` // Specifies whether the container has passed its readiness probe. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. + // The number of times the container has been restarted. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` // The image the container is running. - // More info: https://kubernetes.io/docs/concepts/containers/images - // TODO(dchen1107): Which image the container is running with? + // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` // ImageID of the container's image. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` @@ -2503,6 +2609,7 @@ type ContainerStatus struct { } // PodPhase is a label for the condition of a pod at the current time. +// +enum type PodPhase string // These are the valid statuses of pods. @@ -2522,13 +2629,14 @@ const ( PodFailed PodPhase = "Failed" // PodUnknown means that for some reason the state of the pod could not be obtained, typically due // to an error in communicating with the host of the pod. + // Deprecated: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095) PodUnknown PodPhase = "Unknown" ) // PodConditionType is a valid value for PodCondition.Type type PodConditionType string -// These are valid conditions of pod. +// These are built-in conditions of pod. An application may use a custom condition not listed here. const ( // ContainersReady indicates whether all containers in the pod are ready. ContainersReady PodConditionType = "ContainersReady" @@ -2575,6 +2683,7 @@ type PodCondition struct { // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one // is RestartPolicyAlways. +// +enum type RestartPolicy string const ( @@ -2584,6 +2693,7 @@ const ( ) // DNSPolicy defines how a pod's DNS will be configured. +// +enum type DNSPolicy string const ( @@ -2616,6 +2726,7 @@ const ( // A node selector represents the union of the results of one or more label queries // over a set of nodes; that is, it represents the OR of the selectors represented // by the node selector terms. +// +structType=atomic type NodeSelector struct { //Required. A list of node selector terms. The terms are ORed. NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` @@ -2624,6 +2735,7 @@ type NodeSelector struct { // A null or empty node selector term matches no objects. The requirements of // them are ANDed. // The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +// +structType=atomic type NodeSelectorTerm struct { // A list of node selector requirements by node's labels. // +optional @@ -2652,6 +2764,7 @@ type NodeSelectorRequirement struct { // A node selector operator is the set of operators that can be used in // a node selector requirement. +// +enum type NodeSelectorOperator string const ( @@ -2668,7 +2781,10 @@ const ( // The requirements of them are ANDed. // It provides a subset of functionality as NodeSelectorTerm. // This is an alpha feature and may change in the future. +// +structType=atomic type TopologySelectorTerm struct { + // Usage: Fields of type []TopologySelectorTerm must be listType=atomic. + // A list of topology selector requirements by labels. // +optional MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"` @@ -2803,7 +2919,7 @@ type PodAffinityTerm struct { // and the ones listed in the namespaces field. // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. - // This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` } @@ -2866,6 +2982,7 @@ type Taint struct { TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } +// +enum type TaintEffect string const ( @@ -2919,6 +3036,7 @@ type Toleration struct { } // A toleration operator is the set of operators that can be used in a toleration. +// +enum type TolerationOperator string const ( @@ -2967,7 +3085,7 @@ type PodSpec struct { // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge @@ -3005,6 +3123,7 @@ type PodSpec struct { // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional + // +mapType=atomic NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. @@ -3108,14 +3227,14 @@ type PodSpec struct { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates // +optional ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` @@ -3136,8 +3255,8 @@ type PodSpec struct { // The RuntimeClass admission controller will reject Pod create requests which have the overhead already // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` // TopologySpreadConstraints describes how a group of pods ought to spread across topology @@ -3157,8 +3276,57 @@ type PodSpec struct { // Default to false. // +optional SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"` + // Specifies the OS of the containers in the pod. + // Some pod and container fields are restricted if this is set. + // + // If the OS field is set to linux, the following fields must be unset: + // -securityContext.windowsOptions + // + // If the OS field is set to windows, following fields must be unset: + // - spec.hostPID + // - spec.hostIPC + // - spec.securityContext.seLinuxOptions + // - spec.securityContext.seccompProfile + // - spec.securityContext.fsGroup + // - spec.securityContext.fsGroupChangePolicy + // - spec.securityContext.sysctls + // - spec.shareProcessNamespace + // - spec.securityContext.runAsUser + // - spec.securityContext.runAsGroup + // - spec.securityContext.supplementalGroups + // - spec.containers[*].securityContext.seLinuxOptions + // - spec.containers[*].securityContext.seccompProfile + // - spec.containers[*].securityContext.capabilities + // - spec.containers[*].securityContext.readOnlyRootFilesystem + // - spec.containers[*].securityContext.privileged + // - spec.containers[*].securityContext.allowPrivilegeEscalation + // - spec.containers[*].securityContext.procMount + // - spec.containers[*].securityContext.runAsUser + // - spec.containers[*].securityContext.runAsGroup + // +optional + // This is an alpha field and requires the IdentifyPodOS feature + OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"` +} + +// OSName is the set of OS'es that can be used in OS. +type OSName string + +// These are valid values for OSName +const ( + Linux OSName = "linux" + Windows OSName = "windows" +) + +// PodOS defines the OS parameters of a pod. +type PodOS struct { + // Name is the name of the operating system. The currently supported values are linux and windows. + // Additional value may be defined in future and can be one of: + // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + // Clients should expect to handle additional values and treat unrecognized values in this field as os: null + Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"` } +// +enum type UnsatisfiableConstraintAction string const ( @@ -3203,7 +3371,7 @@ type TopologySpreadConstraint struct { // but giving higher precedence to topologies that would help reduce the // skew. // A constraint is considered "Unsatisfiable" for an incoming pod - // if and only if every possible node assigment for that pod would violate + // if and only if every possible node assignment for that pod would violate // "MaxSkew" on some topology. // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same // labelSelector spread as 3/1/1: @@ -3241,6 +3409,7 @@ type HostAlias struct { // PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume // when volume is mounted. +// +enum type PodFSGroupChangePolicy string const ( @@ -3264,11 +3433,13 @@ type PodSecurityContext struct { // container. May also be set in SecurityContext. If set in // both SecurityContext and PodSecurityContext, the value specified in SecurityContext // takes precedence for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` // The Windows specific settings applied to all containers. // If unspecified, the options within a container's SecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. @@ -3276,6 +3447,7 @@ type PodSecurityContext struct { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` // The GID to run the entrypoint of the container process. @@ -3283,6 +3455,7 @@ type PodSecurityContext struct { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"` // Indicates that the container must run as a non-root user. @@ -3296,6 +3469,7 @@ type PodSecurityContext struct { // A list of groups applied to the first process run in each container, in addition // to the container's primary GID. If unspecified, no groups will be added to // any container. + // Note that this field cannot be set when spec.os.name is windows. // +optional SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` // A special supplemental group that applies to all containers in a pod. @@ -3307,10 +3481,12 @@ type PodSecurityContext struct { // 3. The permission bits are OR'd with rw-rw---- // // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // Note that this field cannot be set when spec.os.name is windows. // +optional FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported // sysctls (by the container runtime) might fail to launch. + // Note that this field cannot be set when spec.os.name is windows. // +optional Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume @@ -3319,9 +3495,11 @@ type PodSecurityContext struct { // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + // Note that this field cannot be set when spec.os.name is windows. // +optional FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` // The seccomp options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"` } @@ -3347,6 +3525,7 @@ type SeccompProfile struct { } // SeccompProfileType defines the supported seccomp profile types. +// +enum type SeccompProfileType string const ( @@ -3355,12 +3534,12 @@ const ( // SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile. SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault" // SeccompProfileTypeLocalhost indicates a profile defined in a file on the node should be used. - // The file's location is based off the kubelet's deprecated flag --seccomp-profile-root. - // Once the flag support is removed the location will be /seccomp. + // The file's location relative to /seccomp. SeccompProfileTypeLocalhost SeccompProfileType = "Localhost" ) // PodQOSClass defines the supported qos classes of Pods. +// +enum type PodQOSClass string const ( @@ -3423,20 +3602,20 @@ type EphemeralContainerCommon struct { // Entrypoint array. Not executed within a shell. // The docker image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` @@ -3447,7 +3626,13 @@ type EphemeralContainerCommon struct { // +optional WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` // Ports are not allowed for ephemeral containers. - Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys // will be reported as an event when the container is starting. When a key exists in multiple @@ -3466,7 +3651,7 @@ type EphemeralContainerCommon struct { // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Pod volumes to mount into the container's filesystem. + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional // +patchMergeKey=mountPath @@ -3514,7 +3699,8 @@ type EphemeralContainerCommon struct { // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images // +optional ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` - // SecurityContext is not allowed for ephemeral containers. + // Optional: SecurityContext defines the security options the ephemeral container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. // +optional SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` @@ -3545,15 +3731,16 @@ type EphemeralContainerCommon struct { // these two types. var _ = Container(EphemeralContainerCommon{}) -// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// An EphemeralContainer is a temporary container that you may add to an existing Pod for // user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a pod is -// removed or restarted. If an ephemeral container causes a pod to exceed its resource -// allocation, the pod may be evicted. -// Ephemeral containers may not be added by directly updating the pod spec. They must be added -// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec -// once added. -// This is an alpha feature enabled by the EphemeralContainers feature flag. +// scheduling guarantees, and they will not be restarted when they exit or when a Pod is +// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the +// Pod to exceed its resource allocation. +// +// To add an ephemeral container, use the ephemeralcontainers subresource of an existing +// Pod. Ephemeral containers may not be removed or restarted. +// +// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. type EphemeralContainer struct { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -3563,8 +3750,10 @@ type EphemeralContainer struct { // If set, the name of the container from PodSpec that this ephemeral container targets. // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container is run in whatever namespaces are shared - // for the pod. Note that the container runtime must support this feature. + // If not set then the ephemeral container uses the namespaces configured in the Pod spec. + // + // The container runtime must implement support for this feature. If the runtime does not + // support namespace targeting then the result of setting this field is undefined. // +optional TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` } @@ -3654,7 +3843,7 @@ type PodStatus struct { // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. - // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } @@ -3678,8 +3867,7 @@ type PodStatusResult struct { } // +genclient -// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers -// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created @@ -3785,6 +3973,7 @@ type ReplicationControllerSpec struct { // controller, if empty defaulted to labels on Pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional + // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // TemplateRef is a reference to an object that describes the pod that will be created if @@ -3902,6 +4091,7 @@ type ReplicationControllerList struct { } // Session Affinity Type string +// +enum type ServiceAffinity string const ( @@ -3931,6 +4121,7 @@ type ClientIPConfig struct { } // Service Type string describes ingress methods for a service +// +enum type ServiceType string const ( @@ -3955,6 +4146,7 @@ const ( // ServiceInternalTrafficPolicyType describes the type of traffic routing for // internal traffic +// +enum type ServiceInternalTrafficPolicyType string const ( @@ -3967,6 +4159,7 @@ const ( ) // Service External Traffic Policy Type string +// +enum type ServiceExternalTrafficPolicyType string const ( @@ -4026,13 +4219,9 @@ type LoadBalancerIngress struct { Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` } -const ( - // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service - MaxServiceTopologyKeys = 16 -) - // IPFamily represents the IP Family (IPv4 or IPv6). This type is used // to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). +// +enum type IPFamily string const ( @@ -4043,6 +4232,7 @@ const ( ) // IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +// +enum type IPFamilyPolicyType string const ( @@ -4083,6 +4273,7 @@ type ServiceSpec struct { // Ignored if type is ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/ // +optional + // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // clusterIP is the IP address of the service and is usually assigned @@ -4122,12 +4313,9 @@ type ServiceSpec struct { // clients must ensure that clusterIPs[0] and clusterIP have the same // value. // - // Unless the "IPv6DualStack" feature gate is enabled, this field is - // limited to one value, which must be the same as the clusterIP field. If - // the feature gate is enabled, this field may hold a maximum of two - // entries (dual-stack IPs, in either order). These IPs must correspond to - // the values of the ipFamilies field. Both clusterIPs and ipFamilies are - // governed by the ipFamilyPolicy field. + // This field may hold a maximum of two entries (dual-stack IPs, in either order). + // These IPs must correspond to the values of the ipFamilies field. Both + // clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +listType=atomic // +optional @@ -4179,7 +4367,7 @@ type ServiceSpec struct { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` @@ -4226,38 +4414,23 @@ type ServiceSpec struct { // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` - // topologyKeys is a preference-order list of topology keys which - // implementations of services should use to preferentially sort endpoints - // when accessing this Service, it can not be used at the same time as - // externalTrafficPolicy=Local. - // Topology keys must be valid label keys and at most 16 keys may be specified. - // Endpoints are chosen based on the first topology key with available backends. - // If this field is specified and all entries have no backends that match - // the topology of the client, the service has no backends for that client - // and connections should fail. - // The special value "*" may be used to mean "any topology". This catch-all - // value, if used, only makes sense as the last value in the list. - // If this is not specified or empty, no topology constraints will be applied. - // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. - // This field is deprecated and will be removed in a future version. - // +optional - TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + // TopologyKeys is tombstoned to show why 16 is reserved protobuf tag. + //TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - // service, and is gated by the "IPv6DualStack" feature gate. This field - // is usually assigned automatically based on cluster configuration and the - // ipFamilyPolicy field. If this field is specified manually, the requested - // family is available in the cluster, and ipFamilyPolicy allows it, it - // will be used; otherwise creation of the service will fail. This field - // is conditionally mutable: it allows for adding or removing a secondary - // IP family, but it does not allow changing the primary IP family of the - // Service. Valid values are "IPv4" and "IPv6". This field only applies - // to Services of types ClusterIP, NodePort, and LoadBalancer, and does - // apply to "headless" services. This field will be wiped when updating a - // Service to type ExternalName. + // service. This field is usually assigned automatically based on cluster + // configuration and the ipFamilyPolicy field. If this field is specified + // manually, the requested family is available in the cluster, + // and ipFamilyPolicy allows it, it will be used; otherwise creation of + // the service will fail. This field is conditionally mutable: it allows + // for adding or removing a secondary IP family, but it does not allow + // changing the primary IP family of the Service. Valid values are "IPv4" + // and "IPv6". This field only applies to Services of types ClusterIP, + // NodePort, and LoadBalancer, and does apply to "headless" services. + // This field will be wiped when updating a Service to type ExternalName. // // This field may hold a maximum of two entries (dual-stack families, in // either order). These families must correspond to the values of the @@ -4268,23 +4441,25 @@ type ServiceSpec struct { IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"` // IPFamilyPolicy represents the dual-stack-ness requested or required by - // this Service, and is gated by the "IPv6DualStack" feature gate. If - // there is no value provided, then this field will be set to SingleStack. - // Services can be "SingleStack" (a single IP family), "PreferDualStack" - // (two IP families on dual-stack configured clusters or a single IP family - // on single-stack clusters), or "RequireDualStack" (two IP families on - // dual-stack configured clusters, otherwise fail). The ipFamilies and - // clusterIPs fields depend on the value of this field. This field will be - // wiped when updating a service to type ExternalName. + // this Service. If there is no value provided, then this field will be set + // to SingleStack. Services can be "SingleStack" (a single IP family), + // "PreferDualStack" (two IP families on dual-stack configured clusters or + // a single IP family on single-stack clusters), or "RequireDualStack" + // (two IP families on dual-stack configured clusters, otherwise fail). The + // ipFamilies and clusterIPs fields depend on the value of this field. This + // field will be wiped when updating a service to type ExternalName. // +optional IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` // allocateLoadBalancerNodePorts defines if NodePorts will be automatically - // allocated for services with type LoadBalancer. Default is "true". It may be - // set to "false" if the cluster load-balancer does not rely on NodePorts. - // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer - // and will be cleared if the type is changed to any other type. - // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // allocated for services with type LoadBalancer. Default is "true". It + // may be set to "false" if the cluster load-balancer does not rely on + // NodePorts. If the caller requests specific NodePorts (by specifying a + // value), those requests will be respected, regardless of this field. + // This field may only be set for services with type LoadBalancer and will + // be cleared if the type is changed to any other type. + // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +featureGate=ServiceLBNodePortControl // +optional AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"` @@ -4335,8 +4510,6 @@ type ServicePort struct { // RFC-6335 and http://www.iana.org/assignments/service-names). // Non-standard protocols should use prefixed names such as // mycompany.com/my-custom-protocol. - // This is a beta field that is guarded by the ServiceAppProtocol feature - // gate and enabled by default. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` @@ -4523,6 +4696,7 @@ type EndpointSubset struct { } // EndpointAddress is a tuple that describes single IP address. +// +structType=atomic type EndpointAddress struct { // The IP of this endpoint. // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), @@ -4543,6 +4717,7 @@ type EndpointAddress struct { } // EndpointPort is a tuple that describes a single port. +// +structType=atomic type EndpointPort struct { // The name of this port. This must match the 'name' field in the // corresponding ServicePort. @@ -4566,8 +4741,6 @@ type EndpointPort struct { // RFC-6335 and http://www.iana.org/assignments/service-names). // Non-standard protocols should use prefixed names such as // mycompany.com/my-custom-protocol. - // This is a beta field that is guarded by the ServiceAppProtocol feature - // gate and enabled by default. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` } @@ -4609,8 +4782,10 @@ type NodeSpec struct { // If specified, the node's taints. // +optional Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + + // Deprecated. If specified, the source of the node's configuration. + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. + // This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration // +optional ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` @@ -4621,6 +4796,7 @@ type NodeSpec struct { } // NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +// This API is deprecated since 1.22 type NodeConfigSource struct { // For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags: // 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags) @@ -4638,6 +4814,7 @@ type NodeConfigSource struct { } // ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration type ConfigMapNodeConfigSource struct { // Namespace is the metadata.namespace of the referenced ConfigMap. // This field is required in all cases. @@ -4856,12 +5033,14 @@ type PodSignature struct { type ContainerImage struct { // Names by which this image is known. // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // +optional Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. // +optional SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` } +// +enum type NodePhase string // These are the valid phases of node. @@ -4876,8 +5055,8 @@ const ( type NodeConditionType string -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: +// These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here. +// The built-in set of conditions are: // NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. const ( // NodeReady means kubelet is healthy and ready to accept pods. @@ -4914,13 +5093,46 @@ type NodeCondition struct { type NodeAddressType string -// These are valid address type of node. +// These are built-in addresses type of node. A cloud provider may set a type not listed here. const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" + // NodeHostName identifies a name of the node. Although every node can be assumed + // to have a NodeAddress of this type, its exact syntax and semantics are not + // defined, and are not consistent between different clusters. + NodeHostName NodeAddressType = "Hostname" + + // NodeInternalIP identifies an IP address which is assigned to one of the node's + // network interfaces. Every node should have at least one address of this type. + // + // An internal IP is normally expected to be reachable from every other node, but + // may not be visible to hosts outside the cluster. By default it is assumed that + // kube-apiserver can reach node internal IPs, though it is possible to configure + // clusters where this is not the case. + // + // NodeInternalIP is the default type of node IP, and does not necessarily imply + // that the IP is ONLY reachable internally. If a node has multiple internal IPs, + // no specific semantics are assigned to the additional IPs. + NodeInternalIP NodeAddressType = "InternalIP" + + // NodeExternalIP identifies an IP address which is, in some way, intended to be + // more usable from outside the cluster then an internal IP, though no specific + // semantics are defined. It may be a globally routable IP, though it is not + // required to be. + // + // External IPs may be assigned directly to an interface on the node, like a + // NodeInternalIP, or alternatively, packets sent to the external IP may be NAT'ed + // to an internal node IP rather than being delivered directly (making the IP less + // efficient for node-to-node traffic than a NodeInternalIP). + NodeExternalIP NodeAddressType = "ExternalIP" + + // NodeInternalDNS identifies a DNS name which resolves to an IP address which has + // the characteristics of a NodeInternalIP. The IP it resolves to may or may not + // be a listed NodeInternalIP address. NodeInternalDNS NodeAddressType = "InternalDNS" + + // NodeExternalDNS identifies a DNS name which resolves to an IP address which has + // the characteristics of a NodeExternalIP. The IP it resolves to may or may not + // be a listed NodeExternalIP address. + NodeExternalDNS NodeAddressType = "ExternalDNS" ) // NodeAddress contains information for the node's address. @@ -5034,6 +5246,7 @@ type NamespaceStatus struct { Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } +// +enum type NamespacePhase string // These are the valid phases of a namespace. @@ -5052,7 +5265,7 @@ const ( type NamespaceConditionType string -// These are valid conditions of a namespace. +// These are built-in conditions of a namespace. const ( // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" @@ -5135,22 +5348,6 @@ type Binding struct { Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. -type EphemeralContainers struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // A list of ephemeral containers associated with this pod. New ephemeral containers - // may be appended to this list, but existing ephemeral containers may not be removed - // or modified. - // +patchMergeKey=name - // +patchStrategy=merge - EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` -} - // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. // +k8s:openapi-gen=false type Preconditions struct { @@ -5265,12 +5462,10 @@ type PodExecOptions struct { Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` // Redirect the standard output stream of the pod for this call. - // Defaults to true. // +optional Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` // Redirect the standard error stream of the pod for this call. - // Defaults to true. // +optional Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` @@ -5362,6 +5557,7 @@ type ServiceProxyOptions struct { // Instead of using this type, create a locally provided and used type that is well-focused on your reference. // For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +structType=atomic type ObjectReference struct { // Kind of the referent. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds @@ -5401,6 +5597,7 @@ type ObjectReference struct { // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. +// +structType=atomic type LocalObjectReference struct { // Name of the referent. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -5411,6 +5608,7 @@ type LocalObjectReference struct { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. +// +structType=atomic type TypedLocalObjectReference struct { // APIGroup is the group for the resource being referenced. // If APIGroup is not specified, the specified Kind must be in the core API group. @@ -5555,7 +5753,8 @@ type EventList struct { // List holds a list of objects, which may not be known by the server. type List metav1.List -// LimitType is a type of object that is limited +// LimitType is a type of object that is limited. It can be Pod, Container, PersistentVolumeClaim or +// a fully qualified resource name. type LimitType string const ( @@ -5672,6 +5871,7 @@ const ( ) // A ResourceQuotaScope defines a filter that must match each object tracked by a quota +// +enum type ResourceQuotaScope string const ( @@ -5686,7 +5886,7 @@ const ( // Match all pod objects that have priority class mentioned ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" // Match all pod objects that have cross-namespace pod (anti)affinity mentioned. - // This is an alpha feature enabled by the PodAffinityNamespaceSelector feature flag. + // This is a beta feature enabled by the PodAffinityNamespaceSelector feature flag. ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity" ) @@ -5709,6 +5909,7 @@ type ResourceQuotaSpec struct { // A scope selector represents the AND of the selectors represented // by the scoped-resource selector requirements. +// +structType=atomic type ScopeSelector struct { // A list of scope selector requirements by scope of the resources. // +optional @@ -5733,6 +5934,7 @@ type ScopedResourceSelectorRequirement struct { // A scope selector operator is the set of operators that can be used in // a scope selector requirement. +// +enum type ScopeSelectorOperator string const ( @@ -5825,6 +6027,7 @@ type Secret struct { StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` // Used to facilitate programmatic handling of secret data. + // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types // +optional Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` } @@ -5906,7 +6109,7 @@ const ( // TODO: Consider supporting different formats, specifying CA/destinationCA. SecretTypeTLS SecretType = "kubernetes.io/tls" - // TLSCertKey is the key for tls certificates in a TLS secert. + // TLSCertKey is the key for tls certificates in a TLS secret. TLSCertKey = "tls.crt" // TLSPrivateKeyKey is the key for the private key field in a TLS secret. TLSPrivateKeyKey = "tls.key" @@ -6101,34 +6304,40 @@ type DownwardAPIProjection struct { type SecurityContext struct { // The capabilities to add/drop when running containers. // Defaults to the default set of capabilities granted by the container runtime. + // Note that this field cannot be set when spec.os.name is windows. // +optional Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` // Run container in privileged mode. // Processes in privileged containers are essentially equivalent to root on the host. // Defaults to false. + // Note that this field cannot be set when spec.os.name is windows. // +optional Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` // The SELinux context to be applied to the container. // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` // The Windows specific settings applied to all containers. // If unspecified, the options from the PodSecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` // The GID to run the entrypoint of the container process. // Uses runtime default if unset. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"` // Indicates that the container must run as a non-root user. @@ -6141,6 +6350,7 @@ type SecurityContext struct { RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` // Whether this container has a read-only root filesystem. // Default is false. + // Note that this field cannot be set when spec.os.name is windows. // +optional ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` // AllowPrivilegeEscalation controls whether a process can gain more @@ -6149,21 +6359,25 @@ type SecurityContext struct { // AllowPrivilegeEscalation is true always when the container is: // 1) run as Privileged // 2) has CAP_SYS_ADMIN + // Note that this field cannot be set when spec.os.name is windows. // +optional AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` // procMount denotes the type of proc mount to use for the containers. // The default is DefaultProcMount which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. + // Note that this field cannot be set when spec.os.name is windows. // +optional ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"` // The seccomp options to use by this container. If seccomp options are // provided at both the pod & container level, the container options // override the pod options. + // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"` } +// +enum type ProcMountType string const ( @@ -6212,6 +6426,16 @@ type WindowsSecurityContextOptions struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` + + // HostProcess determines if a container should be run as a 'Host Process' container. + // This field is alpha-level and will only be honored by components that enable the + // WindowsHostProcessContainers feature flag. Setting this field without the feature + // flag will result in errors when validating the Pod. All of a Pod's containers must + // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess + // containers and non-HostProcess containers). In addition, if HostProcess is true + // then HostNetwork must also be set to true. + // +optional + HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 0892b9b5ecbb6..0a60e7008ab93 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (ConfigMapList) SwaggerDoc() map[string]string { } var map_ConfigMapNodeConfigSource = map[string]string{ - "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", + "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration", "namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", "name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", @@ -328,8 +328,8 @@ var map_Container = map[string]string{ "": "A single application container that you want to run within a pod.", "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", @@ -344,7 +344,7 @@ var map_Container = map[string]string{ "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "securityContext": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -428,8 +428,8 @@ var map_ContainerStatus = map[string]string{ "state": "Details about the container's current condition.", "lastState": "Details about the container's last termination condition.", "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", + "restartCount": "The number of times the container has been restarted.", + "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", @@ -506,7 +506,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -558,7 +558,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string { var map_EnvVar = map[string]string{ "": "EnvVar represents an environment variable present in a Container.", "name": "Name of the environment variable. Must be a C_IDENTIFIER.", - "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "value": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", } @@ -579,8 +579,8 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_EphemeralContainer = map[string]string{ - "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", - "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", + "": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.\n\nThis is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.", } func (EphemeralContainer) SwaggerDoc() map[string]string { @@ -591,14 +591,14 @@ var map_EphemeralContainerCommon = map[string]string{ "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "Ports are not allowed for ephemeral containers.", "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", - "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", "readinessProbe": "Probes are not allowed for ephemeral containers.", @@ -607,7 +607,7 @@ var map_EphemeralContainerCommon = map[string]string{ "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - "securityContext": "SecurityContext is not allowed for ephemeral containers.", + "securityContext": "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -617,15 +617,6 @@ func (EphemeralContainerCommon) SwaggerDoc() map[string]string { return map_EphemeralContainerCommon } -var map_EphemeralContainers = map[string]string{ - "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", - "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", -} - -func (EphemeralContainers) SwaggerDoc() map[string]string { - return map_EphemeralContainers -} - var map_EphemeralVolumeSource = map[string]string{ "": "Represents an ephemeral volume that is handled by a normal storage driver.", "volumeClaimTemplate": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", @@ -758,6 +749,15 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { return map_GCEPersistentDiskVolumeSource } +var map_GRPCAction = map[string]string{ + "port": "Port number of the gRPC service. Number must be in the range 1 to 65535.", + "service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.", +} + +func (GRPCAction) SwaggerDoc() map[string]string { + return map_GRPCAction +} + var map_GitRepoVolumeSource = map[string]string{ "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "repository": "Repository URL", @@ -815,17 +815,6 @@ func (HTTPHeader) SwaggerDoc() map[string]string { return map_HTTPHeader } -var map_Handler = map[string]string{ - "": "Handler defines a specific action that should be taken", - "exec": "One and only one of the following should be specified. Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", -} - -func (Handler) SwaggerDoc() map[string]string { - return map_Handler -} - var map_HostAlias = map[string]string{ "": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", "ip": "IP address of the host file entry.", @@ -898,13 +887,24 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { return map_Lifecycle } +var map_LifecycleHandler = map[string]string{ + "": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.", + "exec": "Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", +} + +func (LifecycleHandler) SwaggerDoc() map[string]string { + return map_LifecycleHandler +} + var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -980,7 +980,7 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { var map_LocalVolumeSource = map[string]string{ "": "Local represents directly-attached storage with node affinity (Beta feature)", "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", - "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.", + "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", } func (LocalVolumeSource) SwaggerDoc() map[string]string { @@ -1094,7 +1094,7 @@ func (NodeCondition) SwaggerDoc() map[string]string { } var map_NodeConfigSource = map[string]string{ - "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.", + "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22", "configMap": "ConfigMap is a reference to a Node's ConfigMap", } @@ -1188,7 +1188,7 @@ var map_NodeSpec = map[string]string{ "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", - "configSource": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field", + "configSource": "Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration", "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", } @@ -1306,11 +1306,12 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "resources": "Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.", + "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.", + "dataSourceRef": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1318,11 +1319,13 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "Represents the actual resources of the underlying volume.", - "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "Phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "Represents the actual resources of the underlying volume.", + "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "allocatedResources": "The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "resizeStatus": "ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1452,7 +1455,7 @@ var map_PodAffinityTerm = map[string]string{ "labelSelector": "A label query over a set of resources, in this case pods.", "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled.", + "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1519,8 +1522,8 @@ func (PodDNSConfigOption) SwaggerDoc() map[string]string { var map_PodExecOptions = map[string]string{ "": "PodExecOptions is the query options to a Pod's remote exec call.", "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", - "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", - "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", + "stdout": "Redirect the standard output stream of the pod for this call.", + "stderr": "Redirect the standard error stream of the pod for this call.", "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", "command": "Command is the remote command to execute. argv array. Not executed within a shell.", @@ -1566,6 +1569,15 @@ func (PodLogOptions) SwaggerDoc() map[string]string { return map_PodLogOptions } +var map_PodOS = map[string]string{ + "": "PodOS defines the OS parameters of a pod.", + "name": "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null", +} + +func (PodOS) SwaggerDoc() map[string]string { + return map_PodOS +} + var map_PodPortForwardOptions = map[string]string{ "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", "ports": "List of ports to forward Required when using WebSockets", @@ -1595,16 +1607,16 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.", - "seccompProfile": "The seccomp options to use by the containers in this pod.", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1625,7 +1637,7 @@ var map_PodSpec = map[string]string{ "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1650,13 +1662,14 @@ var map_PodSpec = map[string]string{ "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.", "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1677,7 +1690,7 @@ var map_PodStatus = map[string]string{ "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1783,13 +1796,25 @@ var map_Probe = map[string]string{ "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", - "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.", + "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", } func (Probe) SwaggerDoc() map[string]string { return map_Probe } +var map_ProbeHandler = map[string]string{ + "": "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.", + "exec": "Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "TCPSocket specifies an action involving a TCP port.", + "grpc": "GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.", +} + +func (ProbeHandler) SwaggerDoc() map[string]string { + return map_ProbeHandler +} + var map_ProjectedVolumeSource = map[string]string{ "": "Represents a projected volume source", "sources": "list of volume projections", @@ -2064,7 +2089,7 @@ var map_Secret = map[string]string{ "immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.", - "type": "Used to facilitate programmatic handling of secret data.", + "type": "Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types", } func (Secret) SwaggerDoc() map[string]string { @@ -2134,17 +2159,17 @@ func (SecretVolumeSource) SwaggerDoc() map[string]string { var map_SecurityContext = map[string]string{ "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", - "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", - "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", + "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", } func (SecurityContext) SwaggerDoc() map[string]string { @@ -2218,7 +2243,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2242,21 +2267,20 @@ var map_ServiceSpec = map[string]string{ "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/", "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", - "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version.", - "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", - "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", - "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", + "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", "internalTrafficPolicy": "InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \"Cluster\" routes internal traffic to a Service to all endpoints. \"Local\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \"Cluster\".", } @@ -2378,7 +2402,7 @@ var map_TopologySpreadConstraint = map[string]string{ "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", - "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assigment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", } @@ -2481,7 +2505,7 @@ var map_VolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", "csi": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", - "ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.\n\nThis is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled.", + "ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", } func (VolumeSource) SwaggerDoc() map[string]string { @@ -2515,6 +2539,7 @@ var map_WindowsSecurityContextOptions = map[string]string{ "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index b60baa665610d..fc951ad44d2c2 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -1400,39 +1401,6 @@ func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.EphemeralContainers != nil { - in, out := &in.EphemeralContainers, &out.EphemeralContainers - *out = make([]EphemeralContainer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. -func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { - if in == nil { - return nil - } - out := new(EphemeralContainers) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EphemeralContainers) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) { *out = *in @@ -1701,6 +1669,27 @@ func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSour return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCAction) DeepCopyInto(out *GRPCAction) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAction. +func (in *GRPCAction) DeepCopy() *GRPCAction { + if in == nil { + return nil + } + out := new(GRPCAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { *out = *in @@ -1792,37 +1781,6 @@ func (in *HTTPHeader) DeepCopy() *HTTPHeader { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostAlias) DeepCopyInto(out *HostAlias) { *out = *in @@ -1953,12 +1911,12 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { *out = *in if in.PostStart != nil { in, out := &in.PostStart, &out.PostStart - *out = new(Handler) + *out = new(LifecycleHandler) (*in).DeepCopyInto(*out) } if in.PreStop != nil { in, out := &in.PreStop, &out.PreStop - *out = new(Handler) + *out = new(LifecycleHandler) (*in).DeepCopyInto(*out) } return @@ -1974,6 +1932,37 @@ func (in *Lifecycle) DeepCopy() *Lifecycle { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHandler. +func (in *LifecycleHandler) DeepCopy() *LifecycleHandler { + if in == nil { + return nil + } + out := new(LifecycleHandler) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LimitRange) DeepCopyInto(out *LimitRange) { *out = *in @@ -2967,6 +2956,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec *out = new(TypedLocalObjectReference) (*in).DeepCopyInto(*out) } + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } return } @@ -3002,6 +2996,18 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.ResizeStatus != nil { + in, out := &in.ResizeStatus, &out.ResizeStatus + *out = new(PersistentVolumeClaimResizeStatus) + **out = **in + } return } @@ -3628,6 +3634,22 @@ func (in *PodLogOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodOS) DeepCopyInto(out *PodOS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOS. +func (in *PodOS) DeepCopy() *PodOS { + if in == nil { + return nil + } + out := new(PodOS) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { *out = *in @@ -3922,6 +3944,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(bool) **out = **in } + if in.OS != nil { + in, out := &in.OS, &out.OS + *out = new(PodOS) + **out = **in + } return } @@ -4189,7 +4216,7 @@ func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Probe) DeepCopyInto(out *Probe) { *out = *in - in.Handler.DeepCopyInto(&out.Handler) + in.ProbeHandler.DeepCopyInto(&out.ProbeHandler) if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -4208,6 +4235,42 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeHandler) DeepCopyInto(out *ProbeHandler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(GRPCAction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeHandler. +func (in *ProbeHandler) DeepCopy() *ProbeHandler { + if in == nil { + return nil + } + out := new(ProbeHandler) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { *out = *in @@ -5330,11 +5393,6 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } - if in.TopologyKeys != nil { - in, out := &in.TopologyKeys, &out.TopologyKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.IPFamilies != nil { in, out := &in.IPFamilies, &out.IPFamilies *out = make([]IPFamily, len(*in)) @@ -5943,6 +6001,11 @@ func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContex *out = new(string) **out = **in } + if in.HostProcess != nil { + in, out := &in.HostProcess, &out.HostProcess + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 5844965d0951c..0e1e4a8c37d5e 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -115,6 +115,7 @@ message EndpointHints { } // EndpointPort represents a Port used by an EndpointSlice +// +structType=atomic message EndpointPort { // The name of this port. All ports in an EndpointSlice must have a unique // name. If the EndpointSlice is dervied from a Kubernetes service, this diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index fa990efdb73b5..3c45ba3e7f92e 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -55,6 +55,7 @@ type EndpointSlice struct { } // AddressType represents the type of address referred to by an endpoint. +// +enum type AddressType string const ( @@ -153,6 +154,7 @@ type ForZone struct { } // EndpointPort represents a Port used by an EndpointSlice +// +structType=atomic type EndpointPort struct { // The name of this port. All ports in an EndpointSlice must have a unique // name. If the EndpointSlice is dervied from a Kubernetes service, this diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go index 31a912386f19a..caa872af0059b 100644 --- a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go index f13536b4bc6ef..13b9544b0c70e 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go index c0f2c63f09ac3..ebca42477694c 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go index 19a3c5f776165..738f0163d3a4f 100644 --- a/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index 779ebaf6e94ef..1e073ae9c9ff9 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go index 2ab7b412b9621..227ca4b7de163 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 5e3d165ebe6a2..737a7e79c76c4 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -210,6 +210,9 @@ message DaemonSetStatus { repeated DaemonSetCondition conditions = 10; } +// DaemonSetUpdateStrategy indicates the strategy that the DaemonSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. message DaemonSetUpdateStrategy { // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". // Default is OnDelete. @@ -580,7 +583,12 @@ message IngressRule { // mixing different types of rules in a single Ingress is disallowed, so exactly // one of the following must be set. message IngressRuleValue { - // +optional + // http is a list of http selectors pointing to backends. + // A path is matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" part of a URL + // as defined by RFC 3986. Paths must begin with a '/'. + // A backend defines the referenced service endpoint to which the traffic + // will be forwarded to. optional HTTPIngressRuleValue http = 1; } @@ -756,8 +764,8 @@ message NetworkPolicyPort { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Alpha state and should be enabled using the Feature Gate - // "NetworkPolicyEndPort". + // This feature is in Beta state and is enabled by default. + // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional optional int32 endPort = 3; } @@ -1090,7 +1098,7 @@ message RollingUpdateDaemonSet { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding down to a minimum of one. + // number is calculated from percentage by rounding up. // This cannot be 0 if MaxSurge is 0 // Default value is 1. // Example: when this is set to 30%, at most 30% of the total number of nodes @@ -1237,6 +1245,7 @@ message ScaleStatus { // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional + // +mapType=atomic map selector = 2; // label selector for pods that should match the replicas count. This is a serializated diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index f3479713c5378..963318a2fe1f6 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -37,6 +37,7 @@ type ScaleStatus struct { // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors // +optional + // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // label selector for pods that should match the replicas count. This is a serializated @@ -73,6 +74,7 @@ type Scale struct { // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=Scale,result=Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 @@ -327,6 +329,9 @@ type DeploymentList struct { Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` } +// DaemonSetUpdateStrategy indicates the strategy that the DaemonSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. type DaemonSetUpdateStrategy struct { // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". // Default is OnDelete. @@ -357,7 +362,7 @@ type RollingUpdateDaemonSet struct { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding down to a minimum of one. + // number is calculated from percentage by rounding up. // This cannot be 0 if MaxSurge is 0 // Default value is 1. // Example: when this is set to 30%, at most 30% of the total number of nodes @@ -729,7 +734,12 @@ type IngressRuleValue struct { // 2. Consider adding fields for ingress-type specific global options // usable by a loadbalancer, like http keep-alive. - // +optional + // http is a list of http selectors pointing to backends. + // A path is matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" part of a URL + // as defined by RFC 3986. Paths must begin with a '/'. + // A backend defines the referenced service endpoint to which the traffic + // will be forwarded to. HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` } @@ -827,6 +837,7 @@ type IngressBackend struct { // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=Scale,result=Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 @@ -1482,8 +1493,8 @@ type NetworkPolicyPort struct { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Alpha state and should be enabled using the Feature Gate - // "NetworkPolicyEndPort". + // This feature is in Beta state and is enabled by default. + // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` } diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 870b607a72bda..d70303fa8c468 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -122,6 +122,7 @@ func (DaemonSetStatus) SwaggerDoc() map[string]string { } var map_DaemonSetUpdateStrategy = map[string]string{ + "": "DaemonSetUpdateStrategy indicates the strategy that the DaemonSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.", "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", } @@ -321,7 +322,8 @@ func (IngressRule) SwaggerDoc() map[string]string { } var map_IngressRuleValue = map[string]string{ - "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", + "http": "http is a list of http selectors pointing to backends. A path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. A backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (IngressRuleValue) SwaggerDoc() map[string]string { @@ -414,7 +416,7 @@ var map_NetworkPolicyPort = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate \"NetworkPolicyEndPort\".", + "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -556,7 +558,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", - "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", } diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 838315610942f..e0961588e0679 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 5023dd31a5dfd..963aaffba35a4 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 7b19a273e5463..6c0cf93420027 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -352,8 +352,10 @@ message QueuingConfiguration { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. message ResourcePolicyRule { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. @@ -411,16 +413,20 @@ message ServiceAccountSubject { // ways of matching an originator; by user, group, or service account. // +union message Subject { + // `kind` indicates which one of the other fields is non-empty. // Required // +unionDiscriminator optional string kind = 1; + // `user` matches based on username. // +optional optional UserSubject user = 2; + // `group` matches based on user group name. // +optional optional GroupSubject group = 3; + // `serviceAccount` matches ServiceAccounts. // +optional optional ServiceAccountSubject serviceAccount = 4; } diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index 1e9701fc36ca9..5af677e2f53df 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -63,7 +63,7 @@ const ( // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchema +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -87,7 +87,7 @@ type FlowSchema struct { // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchemaList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -185,13 +185,17 @@ type PolicyRulesWithSubjects struct { // ways of matching an originator; by user, group, or service account. // +union type Subject struct { + // `kind` indicates which one of the other fields is non-empty. // Required // +unionDiscriminator Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // `user` matches based on username. // +optional User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // `group` matches based on user group name. // +optional Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // `serviceAccount` matches ServiceAccounts. // +optional ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` } @@ -237,8 +241,10 @@ type ServiceAccountSubject struct { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. type ResourcePolicyRule struct { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. @@ -338,7 +344,7 @@ type FlowSchemaConditionType string // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfiguration +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -361,7 +367,7 @@ type PriorityLevelConfiguration struct { // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfigurationList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index 211d55e5e8b0e..1827be02d7e8a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -215,7 +215,7 @@ func (QueuingConfiguration) SwaggerDoc() map[string]string { } var map_ResourcePolicyRule = map[string]string{ - "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", @@ -238,8 +238,11 @@ func (ServiceAccountSubject) SwaggerDoc() map[string]string { } var map_Subject = map[string]string{ - "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", - "kind": "Required", + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "`kind` indicates which one of the other fields is non-empty. Required", + "user": "`user` matches based on username.", + "group": "`group` matches based on user group name.", + "serviceAccount": "`serviceAccount` matches ServiceAccounts.", } func (Subject) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go index f5d37954b1da4..7f73f46061f02 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go index 4152aa2a91126..2260141609c79 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -39,7 +40,7 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -63,7 +64,7 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchemaList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -87,7 +88,7 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -111,7 +112,7 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfigurationList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 9ddfc5465bd94..309d397ff2c37 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -352,8 +352,10 @@ message QueuingConfiguration { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. message ResourcePolicyRule { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. @@ -411,16 +413,20 @@ message ServiceAccountSubject { // ways of matching an originator; by user, group, or service account. // +union message Subject { + // `kind` indicates which one of the other fields is non-empty. // Required // +unionDiscriminator optional string kind = 1; + // `user` matches based on username. // +optional optional UserSubject user = 2; + // `group` matches based on user group name. // +optional optional GroupSubject group = 3; + // `serviceAccount` matches ServiceAccounts. // +optional optional ServiceAccountSubject serviceAccount = 4; } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index ece834e929251..b45732642983b 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -57,10 +57,55 @@ const ( ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" ) +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped. + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -82,6 +127,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -179,13 +225,17 @@ type PolicyRulesWithSubjects struct { // ways of matching an originator; by user, group, or service account. // +union type Subject struct { + // `kind` indicates which one of the other fields is non-empty. // Required // +unionDiscriminator Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // `user` matches based on username. // +optional User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // `group` matches based on user group name. // +optional Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // `serviceAccount` matches ServiceAccounts. // +optional ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` } @@ -231,8 +281,10 @@ type ServiceAccountSubject struct { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. type ResourcePolicyRule struct { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. @@ -330,6 +382,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -350,6 +403,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index 8343a883f8615..b3752b6fb7dc3 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -215,7 +215,7 @@ func (QueuingConfiguration) SwaggerDoc() map[string]string { } var map_ResourcePolicyRule = map[string]string{ - "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", @@ -238,8 +238,11 @@ func (ServiceAccountSubject) SwaggerDoc() map[string]string { } var map_Subject = map[string]string{ - "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", - "kind": "Required", + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "`kind` indicates which one of the other fields is non-empty. Required", + "user": "`user` matches based on username.", + "group": "`group` matches based on user group name.", + "serviceAccount": "`serviceAccount` matches ServiceAccounts.", } func (Subject) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go index c8f6e23060a20..b7b84634ace5d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go index d0f2297181de8..ed1e16c26a5fd 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -20,6 +21,10 @@ limitations under the License. package v1beta1 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -32,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -50,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -68,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -86,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go new file mode 100644 index 0000000000000..53b460d374d69 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1beta2 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1beta2 // import "k8s.io/api/flowcontrol/v1beta2" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go new file mode 100644 index 0000000000000..fe550271b183a --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go @@ -0,0 +1,5367 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto + +package v1beta2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta2.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta2.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta2.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta2.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta2.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_ed300aa8e672704e) +} + +var fileDescriptor_ed300aa8e672704e = []byte{ + // 1495 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, + 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, + 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, + 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, + 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, + 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, + 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, + 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, + 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, + 0x84, 0x19, 0xe7, 0xcb, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, + 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, + 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, + 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0x75, 0xae, 0x37, 0x89, 0xd5, 0xf2, + 0x30, 0x2f, 0xcf, 0xb7, 0x99, 0xd5, 0x24, 0x7d, 0x0e, 0x6f, 0xa5, 0x39, 0x50, 0xb3, 0x4e, 0x9a, + 0x46, 0xaf, 0x9f, 0x7a, 0x07, 0x56, 0x3e, 0x6e, 0x38, 0x07, 0x57, 0x2c, 0xca, 0x2c, 0xbb, 0xe6, + 0x5b, 0xb4, 0x4e, 0xbc, 0x2d, 0xc2, 0xea, 0x4e, 0x15, 0x7d, 0x00, 0x59, 0x76, 0xe8, 0x92, 0x82, + 0xb2, 0xa6, 0xbc, 0x96, 0xd7, 0x4f, 0x3d, 0x6a, 0x97, 0x26, 0x3a, 0xed, 0x52, 0xf6, 0xd6, 0xa1, + 0x4b, 0x9e, 0xb5, 0x4b, 0xc7, 0x87, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea, 0xf7, 0x19, 0x00, 0x6e, + 0xb5, 0x2b, 0x42, 0xa3, 0x7b, 0x30, 0xc3, 0xcb, 0xad, 0x1a, 0xcc, 0x10, 0x98, 0xb3, 0xe7, 0xcf, + 0x6a, 0x71, 0xaf, 0xa3, 0xac, 0x35, 0x77, 0xbf, 0xc6, 0x05, 0x54, 0xe3, 0xd6, 0x5a, 0xeb, 0x9c, + 0x76, 0xa3, 0x72, 0x9f, 0x98, 0x6c, 0x8b, 0x30, 0x43, 0x47, 0x32, 0x0b, 0x88, 0x65, 0x38, 0x42, + 0x45, 0x3b, 0x90, 0xa5, 0x2e, 0x31, 0x0b, 0x19, 0x81, 0xae, 0x69, 0xa3, 0x4f, 0x52, 0x8b, 0x73, + 0xdb, 0x75, 0x89, 0xa9, 0xcf, 0x85, 0x15, 0xf2, 0x2f, 0x2c, 0x90, 0xd0, 0x17, 0x30, 0x45, 0x99, + 0xc1, 0x7c, 0x5a, 0x98, 0xec, 0xcb, 0x38, 0x0d, 0x53, 0xf8, 0xe9, 0x0b, 0x12, 0x75, 0x2a, 0xf8, + 0xc6, 0x12, 0x4f, 0x7d, 0x92, 0x81, 0xe5, 0xd8, 0x78, 0xc3, 0xb1, 0xab, 0x16, 0xb3, 0x1c, 0x1b, + 0xbd, 0x9b, 0xe8, 0xfa, 0xc9, 0x9e, 0xae, 0xaf, 0x0c, 0x70, 0x89, 0x3b, 0x8e, 0xde, 0x8e, 0xd2, + 0xcd, 0x08, 0xf7, 0x13, 0xc9, 0xe0, 0xcf, 0xda, 0xa5, 0xc5, 0xc8, 0x2d, 0x99, 0x0f, 0x6a, 0x01, + 0x6a, 0x18, 0x94, 0xdd, 0xf2, 0x0c, 0x9b, 0x06, 0xb0, 0x56, 0x93, 0xc8, 0xaa, 0xdf, 0x18, 0xef, + 0x9c, 0xb8, 0x87, 0xbe, 0x2a, 0x43, 0xa2, 0xcd, 0x3e, 0x34, 0x3c, 0x20, 0x02, 0x7a, 0x15, 0xa6, + 0x3c, 0x62, 0x50, 0xc7, 0x2e, 0x64, 0x45, 0xca, 0x51, 0xbf, 0xb0, 0x90, 0x62, 0xa9, 0x45, 0xaf, + 0xc3, 0x74, 0x93, 0x50, 0x6a, 0xd4, 0x48, 0x21, 0x27, 0x0c, 0x17, 0xa5, 0xe1, 0xf4, 0x56, 0x20, + 0xc6, 0xa1, 0x5e, 0xfd, 0x45, 0x81, 0x85, 0xb8, 0x4f, 0x9b, 0x16, 0x65, 0xe8, 0x6e, 0x1f, 0xf7, + 0xb4, 0xf1, 0x6a, 0xe2, 0xde, 0x82, 0x79, 0x4b, 0x32, 0xdc, 0x4c, 0x28, 0xe9, 0xe2, 0xdd, 0x0d, + 0xc8, 0x59, 0x8c, 0x34, 0x79, 0xd7, 0x27, 0x7b, 0xda, 0x95, 0x42, 0x12, 0x7d, 0x5e, 0xc2, 0xe6, + 0xae, 0x71, 0x00, 0x1c, 0xe0, 0xa8, 0x7f, 0x4d, 0x76, 0x57, 0xc0, 0xf9, 0x88, 0x7e, 0x52, 0x60, + 0xd5, 0xf5, 0x2c, 0xc7, 0xb3, 0xd8, 0xe1, 0x26, 0x69, 0x91, 0xc6, 0x86, 0x63, 0xef, 0x59, 0x35, + 0xdf, 0x33, 0x78, 0x2b, 0x65, 0x51, 0x1b, 0x69, 0x91, 0x77, 0x86, 0x22, 0x60, 0xb2, 0x47, 0x3c, + 0x62, 0x9b, 0x44, 0x57, 0x65, 0x4a, 0xab, 0x23, 0x8c, 0x47, 0xa4, 0x82, 0x3e, 0x05, 0xd4, 0x34, + 0x18, 0xef, 0x68, 0x6d, 0xc7, 0x23, 0x26, 0xa9, 0x72, 0x54, 0x41, 0xc8, 0x5c, 0xcc, 0x8e, 0xad, + 0x3e, 0x0b, 0x3c, 0xc0, 0x0b, 0x7d, 0xab, 0xc0, 0x72, 0xb5, 0x7f, 0xc8, 0x48, 0x5e, 0x5e, 0x1a, + 0xa7, 0xd1, 0x03, 0x66, 0x94, 0xbe, 0xd2, 0x69, 0x97, 0x96, 0x07, 0x28, 0xf0, 0xa0, 0x60, 0xe8, + 0x2e, 0xe4, 0x3c, 0xbf, 0x41, 0x68, 0x21, 0x2b, 0x8e, 0x37, 0x35, 0xea, 0x8e, 0xd3, 0xb0, 0xcc, + 0x43, 0xcc, 0x5d, 0x3e, 0xb7, 0x58, 0x7d, 0xd7, 0x17, 0xb3, 0x8a, 0xc6, 0x67, 0x2d, 0x54, 0x38, + 0x00, 0x55, 0x1f, 0xc2, 0x52, 0xef, 0xd0, 0x40, 0x35, 0x00, 0x33, 0xbc, 0xa7, 0xb4, 0xa0, 0x88, + 0xb0, 0x6f, 0x8e, 0xcf, 0xaa, 0xe8, 0x8e, 0xc7, 0xf3, 0x32, 0x12, 0x51, 0xdc, 0x05, 0xad, 0x9e, + 0x85, 0xb9, 0xab, 0x9e, 0xe3, 0xbb, 0x32, 0x47, 0xb4, 0x06, 0x59, 0xdb, 0x68, 0x86, 0xd3, 0x27, + 0x9a, 0x88, 0xdb, 0x46, 0x93, 0x60, 0xa1, 0x51, 0x7f, 0x54, 0x60, 0x7e, 0xd3, 0x6a, 0x5a, 0x0c, + 0x13, 0xea, 0x3a, 0x36, 0x25, 0xe8, 0x62, 0x62, 0x62, 0x9d, 0xe8, 0x99, 0x58, 0x47, 0x12, 0xc6, + 0x5d, 0xb3, 0xea, 0x4b, 0x98, 0x7e, 0xe0, 0x13, 0xdf, 0xb2, 0x6b, 0x72, 0x5e, 0x5f, 0x48, 0x2b, + 0xf0, 0x66, 0x60, 0x9e, 0x60, 0x9b, 0x3e, 0xcb, 0x47, 0x80, 0xd4, 0xe0, 0x10, 0x51, 0xfd, 0x5b, + 0x81, 0x13, 0x22, 0x30, 0xa9, 0x0e, 0x67, 0x31, 0xba, 0x0b, 0x05, 0x83, 0x52, 0xdf, 0x23, 0xd5, + 0x0d, 0xc7, 0x36, 0x7d, 0x8f, 0xf3, 0xff, 0x70, 0xb7, 0x6e, 0x78, 0x84, 0x8a, 0x6a, 0x72, 0xfa, + 0x9a, 0xac, 0xa6, 0xb0, 0x3e, 0xc4, 0x0e, 0x0f, 0x45, 0x40, 0xf7, 0x61, 0xbe, 0xd1, 0x5d, 0xbb, + 0x2c, 0xf3, 0x4c, 0x5a, 0x99, 0x89, 0x86, 0xe9, 0xc7, 0x64, 0x06, 0xc9, 0xa6, 0xe3, 0x24, 0xb4, + 0x7a, 0x00, 0xc7, 0xb6, 0xf9, 0x1d, 0xa6, 0x8e, 0xef, 0x99, 0x24, 0x26, 0x20, 0x2a, 0x41, 0xae, + 0x45, 0xbc, 0x4a, 0x40, 0xa2, 0xbc, 0x9e, 0xe7, 0xf4, 0xfb, 0x8c, 0x0b, 0x70, 0x20, 0x47, 0xef, + 0xc1, 0xa2, 0x1d, 0x7b, 0xde, 0xc6, 0x9b, 0xb4, 0x30, 0x25, 0x4c, 0x97, 0x3b, 0xed, 0xd2, 0xe2, + 0x76, 0x52, 0x85, 0x7b, 0x6d, 0xd5, 0x76, 0x06, 0x56, 0x86, 0xf0, 0x1d, 0xdd, 0x86, 0x19, 0x2a, + 0x7f, 0x4b, 0x0e, 0x9f, 0x4c, 0xab, 0x5d, 0xfa, 0xc6, 0xd3, 0x36, 0x04, 0xc3, 0x11, 0x14, 0x72, + 0x60, 0xde, 0x93, 0x29, 0x88, 0x98, 0x72, 0xea, 0x9e, 0x4f, 0xc3, 0xee, 0xef, 0x4e, 0xdc, 0x5c, + 0xdc, 0x0d, 0x88, 0x93, 0xf8, 0xe8, 0x21, 0x2c, 0x75, 0x95, 0x1d, 0xc4, 0x9c, 0x14, 0x31, 0x2f, + 0xa6, 0xc5, 0x1c, 0x78, 0x28, 0x7a, 0x41, 0x86, 0x5d, 0xda, 0xee, 0x81, 0xc5, 0x7d, 0x81, 0xd4, + 0xdf, 0x32, 0x30, 0x62, 0x10, 0xbf, 0x84, 0xa5, 0xea, 0x5e, 0x62, 0xa9, 0x7a, 0xff, 0xf9, 0x5f, + 0x98, 0xa1, 0x4b, 0x56, 0xbd, 0x67, 0xc9, 0xfa, 0xf0, 0x05, 0x62, 0x8c, 0x5e, 0xba, 0xfe, 0xc9, + 0xc0, 0x2b, 0xc3, 0x9d, 0xe3, 0x25, 0xec, 0x7a, 0x62, 0xa4, 0x5d, 0xea, 0x19, 0x69, 0x27, 0xc7, + 0x80, 0xf8, 0x7f, 0x29, 0xeb, 0x59, 0xca, 0x7e, 0x57, 0xa0, 0x38, 0xbc, 0x6f, 0x2f, 0x61, 0x49, + 0xfb, 0x2a, 0xb9, 0xa4, 0xbd, 0xf3, 0xfc, 0x24, 0x1b, 0xb2, 0xb4, 0x5d, 0x1d, 0xc5, 0xad, 0x68, + 0xbd, 0x1a, 0xe3, 0x89, 0xfd, 0x75, 0x64, 0xab, 0xc4, 0x36, 0x98, 0xf2, 0x2f, 0x21, 0xe1, 0xfd, + 0x91, 0x6d, 0x54, 0x1a, 0xa4, 0x49, 0x6c, 0x26, 0x09, 0x59, 0x87, 0xe9, 0x46, 0xf0, 0x36, 0xca, + 0x4b, 0xbd, 0x3e, 0xd6, 0x93, 0x34, 0xea, 0x29, 0x0d, 0x9e, 0x61, 0x69, 0x86, 0x43, 0x78, 0xf5, + 0x07, 0x05, 0xd6, 0xd2, 0x2e, 0x2b, 0x3a, 0x18, 0xb0, 0xec, 0xbc, 0xc0, 0x22, 0x3b, 0xfe, 0xf2, + 0xf3, 0xb3, 0x02, 0x47, 0x07, 0xed, 0x14, 0x9c, 0xfe, 0x7c, 0x91, 0x88, 0xb6, 0x80, 0x88, 0xfe, + 0x37, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x0d, 0x33, 0x75, 0xc3, 0xae, 0xee, 0x5a, 0x5f, 0x87, 0xfb, + 0x6d, 0x44, 0xc0, 0x4f, 0xa4, 0x1c, 0x47, 0x16, 0xe8, 0x0a, 0x2c, 0x09, 0xbf, 0x4d, 0x62, 0xd7, + 0x58, 0x5d, 0xf4, 0x4a, 0x5c, 0xe5, 0x5c, 0xfc, 0x1e, 0xdc, 0xec, 0xd1, 0xe3, 0x3e, 0x0f, 0xf5, + 0x5f, 0x05, 0xd0, 0xf3, 0xbc, 0xf3, 0xa7, 0x20, 0x6f, 0xb8, 0x96, 0x58, 0xf6, 0x82, 0x2b, 0x90, + 0xd7, 0xe7, 0x3b, 0xed, 0x52, 0x7e, 0x7d, 0xe7, 0x5a, 0x20, 0xc4, 0xb1, 0x9e, 0x1b, 0x87, 0x4f, + 0x60, 0xf0, 0xd4, 0x49, 0xe3, 0x30, 0x30, 0xc5, 0xb1, 0x1e, 0x5d, 0x86, 0x39, 0xb3, 0xe1, 0x53, + 0x46, 0xbc, 0x5d, 0xd3, 0x71, 0x89, 0x18, 0x19, 0x33, 0xfa, 0x51, 0x59, 0xd3, 0xdc, 0x46, 0x97, + 0x0e, 0x27, 0x2c, 0x91, 0x06, 0xc0, 0x09, 0x4f, 0x5d, 0x83, 0xc7, 0xc9, 0x89, 0x38, 0x0b, 0xfc, + 0xc0, 0xb6, 0x23, 0x29, 0xee, 0xb2, 0x50, 0xef, 0xc3, 0xb1, 0x5d, 0xe2, 0xb5, 0x2c, 0x93, 0xac, + 0x9b, 0xa6, 0xe3, 0xdb, 0x2c, 0x5c, 0x5b, 0xcb, 0x90, 0x8f, 0xcc, 0xe4, 0x9d, 0x38, 0x22, 0xe3, + 0xe7, 0x23, 0x2c, 0x1c, 0xdb, 0x44, 0x97, 0x30, 0x33, 0xfc, 0x12, 0x66, 0x60, 0x3a, 0x86, 0xcf, + 0xee, 0x5b, 0x76, 0x55, 0x22, 0x1f, 0x0f, 0xad, 0xaf, 0x5b, 0x76, 0xf5, 0x59, 0xbb, 0x34, 0x2b, + 0xcd, 0xf8, 0x27, 0x16, 0x86, 0xe8, 0x1a, 0x64, 0x7d, 0x4a, 0x3c, 0x79, 0xbd, 0x4e, 0xa5, 0x91, + 0xf9, 0x36, 0x25, 0x5e, 0xb8, 0xf9, 0xcc, 0x70, 0x64, 0x2e, 0xc0, 0x02, 0x02, 0x6d, 0x41, 0xae, + 0xc6, 0x0f, 0x45, 0x4e, 0xfd, 0xd3, 0x69, 0x58, 0xdd, 0xeb, 0x7c, 0x40, 0x03, 0x21, 0xc1, 0x01, + 0x0a, 0x7a, 0x00, 0x0b, 0x34, 0xd1, 0x42, 0x71, 0x5c, 0x63, 0x6c, 0x32, 0x03, 0x1b, 0xaf, 0xa3, + 0x4e, 0xbb, 0xb4, 0x90, 0x54, 0xe1, 0x9e, 0x00, 0x6a, 0x19, 0x66, 0xbb, 0x0a, 0x4c, 0x9f, 0x7f, + 0xfa, 0x99, 0x47, 0x4f, 0x8b, 0x13, 0x8f, 0x9f, 0x16, 0x27, 0x9e, 0x3c, 0x2d, 0x4e, 0x7c, 0xd3, + 0x29, 0x2a, 0x8f, 0x3a, 0x45, 0xe5, 0x71, 0xa7, 0xa8, 0x3c, 0xe9, 0x14, 0x95, 0x3f, 0x3a, 0x45, + 0xe5, 0xbb, 0x3f, 0x8b, 0x13, 0x77, 0xa6, 0x65, 0x66, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, + 0xb4, 0x84, 0x2d, 0xfa, 0x13, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto new file mode 100644 index 0000000000000..7e367b47c8694 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -0,0 +1,440 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1beta2; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta2"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + optional string kind = 1; + + // `user` matches based on username. + // +optional + optional UserSubject user = 2; + + // `group` matches based on user group name. + // +optional + optional GroupSubject group = 3; + + // `serviceAccount` matches ServiceAccounts. + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/register.go b/vendor/k8s.io/api/flowcontrol/v1beta2/register.go new file mode 100644 index 0000000000000..0ffe55133ffd4 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go new file mode 100644 index 0000000000000..408681e998ce0 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -0,0 +1,579 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped. + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // `user` matches based on username. + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // `group` matches based on user group name. + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // `serviceAccount` matches ServiceAccounts. + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go new file mode 100644 index 0000000000000..4775a8e993c5f --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -0,0 +1,261 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "`kind` indicates which one of the other fields is non-empty. Required", + "user": "`user` matches based on username.", + "group": "`group` matches based on user group name.", + "serviceAccount": "`serviceAccount` matches ServiceAccounts.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e6288a687a95e --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,542 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000000..00cefde419f97 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,94 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 8f23477d8fbc6..e98ab179e2cc0 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -35,8 +35,8 @@ option go_package = "v1"; message HTTPIngressPath { // Path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // as defined by RFC 3986. Paths must begin with a '/' and must be present + // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; @@ -167,16 +167,13 @@ message IngressClassParametersReference { // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. // +optional - // +featureGate=IngressClassNamespacedParams optional string scope = 4; // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams optional string namespace = 5; } @@ -439,8 +436,8 @@ message NetworkPolicyPort { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Alpha state and should be enabled using the Feature Gate - // "NetworkPolicyEndPort". + // This feature is in Beta state and is enabled by default. + // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional optional int32 endPort = 3; } diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 6cc210b8940fd..c49110e5cae09 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -40,6 +40,7 @@ type NetworkPolicy struct { // PolicyType string describes the NetworkPolicy type // This type is beta-level in 1.8 +// +enum type PolicyType string const ( @@ -152,8 +153,8 @@ type NetworkPolicyPort struct { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Alpha state and should be enabled using the Feature Gate - // "NetworkPolicyEndPort". + // This feature is in Beta state and is enabled by default. + // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` } @@ -373,6 +374,7 @@ type HTTPIngressRuleValue struct { } // PathType represents the type of path referred to by a HTTPIngressPath. +// +enum type PathType string const ( @@ -407,8 +409,8 @@ const ( type HTTPIngressPath struct { // Path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // as defined by RFC 3986. Paths must begin with a '/' and must be present + // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` @@ -426,7 +428,7 @@ type HTTPIngressPath struct { // the IngressClass. Implementations can treat this as a separate PathType // or treat it identically to Prefix or Exact path types. // Implementations are required to support all path types. - PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` + PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. @@ -534,15 +536,12 @@ type IngressClassParametersReference struct { Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. // +optional - // +featureGate=IngressClassNamespacedParams Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams Namespace *string `json:"namespace,omitempty" protobuf:"bytes,5,opt,name=namespace"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 88f9a8f130b2a..38bd9c502b9ed 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", + "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", } @@ -103,7 +103,7 @@ var map_IngressClassParametersReference = map[string]string{ "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced.", "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". Field can be enabled with IngressClassNamespacedParams feature gate.", + "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } @@ -244,7 +244,7 @@ var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate \"NetworkPolicyEndPort\".", + "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/well_known_annotations.go b/vendor/k8s.io/api/networking/v1/well_known_annotations.go new file mode 100644 index 0000000000000..0e3754d5cd444 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/well_known_annotations.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // AnnotationIsDefaultIngressClass can be used to indicate that an + // IngressClass should be considered default. When a single IngressClass + // resource has this annotation set to true, new Ingress resources without a + // class specified will be assigned this default class. + AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class" +) diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 76e339aed9eb2..82677ca8b9ef0 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index acb6e859cfcf5..ef8cceff4372c 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -35,8 +35,8 @@ option go_package = "v1beta1"; message HTTPIngressPath { // Path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // as defined by RFC 3986. Paths must begin with a '/' and must be present + // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; @@ -154,16 +154,12 @@ message IngressClassParametersReference { // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. - // +optional - // +featureGate=IngressClassNamespacedParams optional string scope = 4; // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams optional string namespace = 5; } diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 09279d6938f5f..1bfdcd0915bba 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -228,8 +228,8 @@ const ( type HTTPIngressPath struct { // Path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // as defined by RFC 3986. Paths must begin with a '/' and must be present + // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` @@ -337,15 +337,11 @@ type IngressClassParametersReference struct { Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. - // +optional - // +featureGate=IngressClassNamespacedParams Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams Namespace *string `json:"namespace,omitempty" protobuf:"bytes,5,opt,name=namespace"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 84337ad3ea84a..79c42a6ba4b4d 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", + "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", } @@ -94,7 +94,7 @@ var map_IngressClassParametersReference = map[string]string{ "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced.", "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". Field can be enabled with IngressClassNamespacedParams feature gate.", + "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index e1b4543d30a72..77259e368d618 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go index 5e69fd5d9f49b..e8b4c7ec7ff4b 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 4a86999f14722..0b98cabb2f6f4 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -97,6 +97,7 @@ message Scheduling { // with a pod's existing nodeSelector. Any conflicts will cause the pod to // be rejected in admission. // +optional + // +mapType=atomic map nodeSelector = 1; // tolerations are appended (excluding duplicates) to pods running with this diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index b32cc36c49604..bfe947e1fcd4b 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -82,6 +82,7 @@ type Scheduling struct { // with a pod's existing nodeSelector. Any conflicts will cause the pod to // be rejected in admission. // +optional + // +mapType=atomic NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` // tolerations are appended (excluding duplicates) to pods running with this diff --git a/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go index 35084da7e3cd4..c1424f045d59a 100644 --- a/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index 310ad490bf98d..d92e18ff333a9 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -43,7 +43,7 @@ message Overhead { // user or cluster provisioner, and referenced in the PodSpec. The Kubelet is // responsible for resolving the RuntimeClassName reference before running the // pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional @@ -84,8 +84,8 @@ message RuntimeClassSpec { // Overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional optional Overhead overhead = 2; @@ -106,6 +106,7 @@ message Scheduling { // with a pod's existing nodeSelector. Any conflicts will cause the pod to // be rejected in admission. // +optional + // +mapType=atomic map nodeSelector = 1; // tolerations are appended (excluding duplicates) to pods running with this diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 03e7e6f333bcc..f11bcbb10ad93 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -31,7 +31,7 @@ import ( // user or cluster provisioner, and referenced in the PodSpec. The Kubelet is // responsible for resolving the RuntimeClassName reference before running the // pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -62,8 +62,8 @@ type RuntimeClassSpec struct { // Overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` @@ -91,6 +91,7 @@ type Scheduling struct { // with a pod's existing nodeSelector. Any conflicts will cause the pod to // be rejected in admission. // +optional + // +mapType=atomic NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` // tolerations are appended (excluding duplicates) to pods running with this diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index d3011466bb4c3..5a259573c3e19 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (Overhead) SwaggerDoc() map[string]string { } var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } @@ -59,7 +59,7 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go index 20f8051835c93..96693626258d1 100644 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 5c2d9d50afa02..3c1e5bbbdcbf3 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -43,7 +43,7 @@ message Overhead { // user or cluster provisioner, and referenced in the PodSpec. The Kubelet is // responsible for resolving the RuntimeClassName reference before running the // pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional @@ -63,8 +63,8 @@ message RuntimeClass { // Overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional optional Overhead overhead = 3; @@ -96,6 +96,7 @@ message Scheduling { // with a pod's existing nodeSelector. Any conflicts will cause the pod to // be rejected in admission. // +optional + // +mapType=atomic map nodeSelector = 1; // tolerations are appended (excluding duplicates) to pods running with this diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 89559949ca5e5..c545abf18b02d 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -33,7 +33,7 @@ import ( // user or cluster provisioner, and referenced in the PodSpec. The Kubelet is // responsible for resolving the RuntimeClassName reference before running the // pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -54,8 +54,8 @@ type RuntimeClass struct { // Overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` @@ -83,6 +83,7 @@ type Scheduling struct { // with a pod's existing nodeSelector. Any conflicts will cause the pod to // be rejected in admission. // +optional + // +mapType=atomic NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` // tolerations are appended (excluding duplicates) to pods running with this diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index a486147f0333f..6d88710340bde 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -37,10 +37,10 @@ func (Overhead) SwaggerDoc() map[string]string { } var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go index c3989528b2af6..e5034a587b603 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go index 2fda72ef5e5fe..860923d8ffcd1 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/policy/v1/generated.pb.go b/vendor/k8s.io/api/policy/v1/generated.pb.go index 9a9f73c124ddf..183db076aa580 100644 --- a/vendor/k8s.io/api/policy/v1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1/generated.pb.go @@ -47,10 +47,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { + return fileDescriptor_2d50488813b2d18e, []int{0} +} +func (m *Eviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Eviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Eviction.Merge(m, src) +} +func (m *Eviction) XXX_Size() int { + return m.Size() +} +func (m *Eviction) XXX_DiscardUnknown() { + xxx_messageInfo_Eviction.DiscardUnknown(m) +} + +var xxx_messageInfo_Eviction proto.InternalMessageInfo + func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{0} + return fileDescriptor_2d50488813b2d18e, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{1} + return fileDescriptor_2d50488813b2d18e, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{2} + return fileDescriptor_2d50488813b2d18e, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{3} + return fileDescriptor_2d50488813b2d18e, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,6 +188,7 @@ func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1.Eviction") proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1.PodDisruptionBudget") proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetSpec") @@ -172,55 +201,103 @@ func init() { } var fileDescriptor_2d50488813b2d18e = []byte{ - // 766 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x6e, 0xe3, 0x44, - 0x14, 0xc6, 0xe3, 0xa4, 0x29, 0x65, 0x36, 0x8d, 0xaa, 0x61, 0x81, 0x90, 0x0b, 0x77, 0xd5, 0xab, - 0x82, 0xb4, 0x63, 0xba, 0x8b, 0x50, 0x85, 0x04, 0x62, 0xbd, 0x59, 0xc1, 0x22, 0x4a, 0x56, 0x53, - 0x10, 0x12, 0x02, 0x89, 0x89, 0x7d, 0xd6, 0x19, 0x62, 0x7b, 0xac, 0x99, 0xb1, 0xd9, 0x5c, 0xc1, - 0x23, 0xf0, 0x0a, 0x3c, 0x0a, 0x57, 0xf4, 0x0a, 0xed, 0xe5, 0x5e, 0x45, 0xd4, 0xbc, 0x08, 0xf2, - 0xd8, 0xf9, 0xe3, 0x24, 0x55, 0x03, 0x77, 0x99, 0x39, 0xe7, 0xfb, 0x9d, 0x39, 0xdf, 0x39, 0x0e, - 0xfa, 0x78, 0x72, 0xae, 0x08, 0x17, 0xce, 0x24, 0x1d, 0x81, 0x8c, 0x41, 0x83, 0x72, 0x32, 0x88, - 0x7d, 0x21, 0x9d, 0x2a, 0xc0, 0x12, 0xee, 0x24, 0x22, 0xe4, 0xde, 0xd4, 0xc9, 0xce, 0x9c, 0x00, - 0x62, 0x90, 0x4c, 0x83, 0x4f, 0x12, 0x29, 0xb4, 0xc0, 0x77, 0xcb, 0x2c, 0xc2, 0x12, 0x4e, 0xca, - 0x2c, 0x92, 0x9d, 0xf5, 0xef, 0x07, 0x5c, 0x8f, 0xd3, 0x11, 0xf1, 0x44, 0xe4, 0x04, 0x22, 0x10, - 0x8e, 0x49, 0x1e, 0xa5, 0xcf, 0xcd, 0xc9, 0x1c, 0xcc, 0xaf, 0x12, 0xd2, 0xff, 0x60, 0x59, 0x2a, - 0x62, 0xde, 0x98, 0xc7, 0x20, 0xa7, 0x4e, 0x32, 0x09, 0x8a, 0x0b, 0xe5, 0x44, 0xa0, 0xd9, 0x96, - 0xd2, 0x7d, 0xe7, 0x26, 0x95, 0x4c, 0x63, 0xcd, 0x23, 0xd8, 0x10, 0x7c, 0x78, 0x9b, 0x40, 0x79, - 0x63, 0x88, 0xd8, 0x86, 0xee, 0xe1, 0x4d, 0xba, 0x54, 0xf3, 0xd0, 0xe1, 0xb1, 0x56, 0x5a, 0xae, - 0x8b, 0x4e, 0x7e, 0x6f, 0xa2, 0x37, 0x9e, 0x09, 0x7f, 0xc0, 0x95, 0x4c, 0x13, 0xcd, 0x45, 0xec, - 0xa6, 0x7e, 0x00, 0x1a, 0xff, 0x88, 0x0e, 0x8a, 0x86, 0x7c, 0xa6, 0x59, 0xcf, 0xba, 0x67, 0x9d, - 0xde, 0x79, 0xf0, 0x3e, 0x59, 0x7a, 0xb8, 0xe0, 0x93, 0x64, 0x12, 0x14, 0x17, 0x8a, 0x14, 0xd9, - 0x24, 0x3b, 0x23, 0xc3, 0xd1, 0x4f, 0xe0, 0xe9, 0x0b, 0xd0, 0xcc, 0xc5, 0x57, 0xb3, 0xe3, 0x46, - 0x3e, 0x3b, 0x46, 0xcb, 0x3b, 0xba, 0xa0, 0xe2, 0x21, 0xda, 0x53, 0x09, 0x78, 0xbd, 0xa6, 0xa1, - 0xdf, 0x27, 0xdb, 0x26, 0x44, 0xb6, 0x3c, 0xed, 0x32, 0x01, 0xcf, 0xed, 0x54, 0xe8, 0xbd, 0xe2, - 0x44, 0x0d, 0x08, 0x7f, 0x8b, 0xf6, 0x95, 0x66, 0x3a, 0x55, 0xbd, 0x96, 0x41, 0x3a, 0xbb, 0x23, - 0x8d, 0xcc, 0xed, 0x56, 0xd0, 0xfd, 0xf2, 0x4c, 0x2b, 0xdc, 0xc9, 0x9f, 0x16, 0x7a, 0x7b, 0x8b, - 0xea, 0x4b, 0xae, 0x34, 0xfe, 0x7e, 0xc3, 0x27, 0xb2, 0x9b, 0x4f, 0x85, 0xda, 0xb8, 0x74, 0x54, - 0x55, 0x3d, 0x98, 0xdf, 0xac, 0x78, 0xf4, 0x15, 0x6a, 0x73, 0x0d, 0x91, 0xea, 0x35, 0xef, 0xb5, - 0x4e, 0xef, 0x3c, 0x78, 0x77, 0xe7, 0x8e, 0xdc, 0xc3, 0x8a, 0xda, 0x7e, 0x5a, 0xe8, 0x69, 0x89, - 0x39, 0xf9, 0xab, 0xb9, 0xb5, 0x93, 0xc2, 0x44, 0xfc, 0x1c, 0x75, 0x22, 0x1e, 0x3f, 0xca, 0x18, - 0x0f, 0xd9, 0x28, 0x84, 0x5b, 0xa7, 0x5e, 0x6c, 0x15, 0x29, 0xb7, 0x8a, 0x3c, 0x8d, 0xf5, 0x50, - 0x5e, 0x6a, 0xc9, 0xe3, 0xc0, 0x3d, 0xca, 0x67, 0xc7, 0x9d, 0x8b, 0x15, 0x12, 0xad, 0x71, 0xf1, - 0x0f, 0xe8, 0x40, 0x41, 0x08, 0x9e, 0x16, 0xb2, 0x9a, 0xfd, 0xc3, 0x1d, 0x1d, 0x63, 0x23, 0x08, - 0x2f, 0x2b, 0xa9, 0xdb, 0x29, 0x2c, 0x9b, 0x9f, 0xe8, 0x02, 0x89, 0x43, 0xd4, 0x8d, 0xd8, 0x8b, - 0x6f, 0x62, 0xb6, 0x68, 0xa4, 0xf5, 0x3f, 0x1b, 0xc1, 0xf9, 0xec, 0xb8, 0x7b, 0x51, 0x63, 0xd1, - 0x35, 0xf6, 0xc9, 0x1f, 0x6d, 0xf4, 0xce, 0x8d, 0x0b, 0x85, 0xbf, 0x40, 0x58, 0x8c, 0x14, 0xc8, - 0x0c, 0xfc, 0xcf, 0xca, 0xef, 0x8e, 0x8b, 0xd8, 0x18, 0xdb, 0x72, 0xfb, 0xd5, 0x80, 0xf0, 0x70, - 0x23, 0x83, 0x6e, 0x51, 0xe1, 0x5f, 0xd0, 0xa1, 0x5f, 0x56, 0x01, 0xff, 0x99, 0xf0, 0xe7, 0x2b, - 0xe1, 0xfe, 0xc7, 0x25, 0x27, 0x83, 0x55, 0xc8, 0x93, 0x58, 0xcb, 0xa9, 0xfb, 0x66, 0xf5, 0x94, - 0xc3, 0x5a, 0x8c, 0xd6, 0xeb, 0x15, 0xcd, 0xf8, 0x0b, 0xa4, 0x7a, 0x14, 0x86, 0xe2, 0x67, 0xf0, - 0x8d, 0xb9, 0xed, 0x65, 0x33, 0x83, 0x8d, 0x0c, 0xba, 0x45, 0x85, 0x3f, 0x41, 0x5d, 0x2f, 0x95, - 0x12, 0x62, 0xfd, 0x39, 0xb0, 0x50, 0x8f, 0xa7, 0xbd, 0x3d, 0xc3, 0x79, 0xab, 0xe2, 0x74, 0x1f, - 0xd7, 0xa2, 0x74, 0x2d, 0xbb, 0xd0, 0xfb, 0xa0, 0xb8, 0x04, 0x7f, 0xae, 0x6f, 0xd7, 0xf5, 0x83, - 0x5a, 0x94, 0xae, 0x65, 0xe3, 0x73, 0xd4, 0x81, 0x17, 0x09, 0x78, 0x73, 0x2f, 0xf7, 0x8d, 0xfa, - 0x6e, 0xa5, 0xee, 0x3c, 0x59, 0x89, 0xd1, 0x5a, 0x26, 0xf6, 0x10, 0xf2, 0x44, 0xec, 0x73, 0xd3, - 0x4e, 0xef, 0x35, 0x33, 0x03, 0x67, 0xb7, 0xfd, 0x7d, 0x3c, 0xd7, 0x2d, 0xff, 0x18, 0x17, 0x57, - 0x8a, 0xae, 0x60, 0xfb, 0x21, 0xc2, 0x9b, 0x63, 0xc2, 0x47, 0xa8, 0x35, 0x81, 0xa9, 0x59, 0x9f, - 0xd7, 0x69, 0xf1, 0x13, 0x7f, 0x8a, 0xda, 0x19, 0x0b, 0x53, 0xa8, 0xbe, 0xa3, 0xf7, 0x76, 0x7b, - 0xc7, 0xd7, 0x3c, 0x02, 0x5a, 0x0a, 0x3f, 0x6a, 0x9e, 0x5b, 0xee, 0xe9, 0xd5, 0xb5, 0xdd, 0x78, - 0x79, 0x6d, 0x37, 0x5e, 0x5d, 0xdb, 0x8d, 0x5f, 0x73, 0xdb, 0xba, 0xca, 0x6d, 0xeb, 0x65, 0x6e, - 0x5b, 0xaf, 0x72, 0xdb, 0xfa, 0x3b, 0xb7, 0xad, 0xdf, 0xfe, 0xb1, 0x1b, 0xdf, 0x35, 0xb3, 0xb3, - 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xca, 0x04, 0x93, 0xc0, 0x85, 0x07, 0x00, 0x00, + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xdf, 0x8e, 0xdb, 0x44, + 0x14, 0xc6, 0xe3, 0x64, 0xb3, 0x2c, 0xd3, 0x24, 0x5a, 0x86, 0x02, 0x4b, 0x2e, 0x1c, 0x94, 0xab, + 0x05, 0xa9, 0x63, 0xb6, 0x45, 0x68, 0x85, 0x04, 0xa2, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xd5, 0x2c, + 0x08, 0x09, 0x81, 0xc4, 0xc4, 0x3e, 0xcd, 0x0e, 0xb1, 0x3d, 0xd6, 0xcc, 0xd8, 0x34, 0x57, 0xf0, + 0x08, 0xbc, 0x02, 0x8f, 0xc2, 0x15, 0x7b, 0x85, 0x7a, 0x59, 0x71, 0x11, 0xb1, 0xe6, 0x45, 0x90, + 0xc7, 0xce, 0x1f, 0x27, 0x59, 0x35, 0xe5, 0x82, 0x3b, 0xcf, 0x99, 0xf3, 0xfd, 0x8e, 0xcf, 0x37, + 0x67, 0x06, 0x7d, 0x3c, 0x39, 0x55, 0x84, 0x0b, 0x67, 0x92, 0x8c, 0x40, 0x46, 0xa0, 0x41, 0x39, + 0x29, 0x44, 0xbe, 0x90, 0x4e, 0xb9, 0xc1, 0x62, 0xee, 0xc4, 0x22, 0xe0, 0xde, 0xd4, 0x49, 0x4f, + 0x9c, 0x31, 0x44, 0x20, 0x99, 0x06, 0x9f, 0xc4, 0x52, 0x68, 0x81, 0x6f, 0x17, 0x59, 0x84, 0xc5, + 0x9c, 0x14, 0x59, 0x24, 0x3d, 0xe9, 0xde, 0x19, 0x73, 0x7d, 0x99, 0x8c, 0x88, 0x27, 0x42, 0x67, + 0x2c, 0xc6, 0xc2, 0x31, 0xc9, 0xa3, 0xe4, 0x89, 0x59, 0x99, 0x85, 0xf9, 0x2a, 0x20, 0xdd, 0x0f, + 0x96, 0xa5, 0x42, 0xe6, 0x5d, 0xf2, 0x08, 0xe4, 0xd4, 0x89, 0x27, 0xe3, 0x3c, 0xa0, 0x9c, 0x10, + 0x34, 0xdb, 0x52, 0xba, 0xeb, 0xdc, 0xa4, 0x92, 0x49, 0xa4, 0x79, 0x08, 0x1b, 0x82, 0x0f, 0x5f, + 0x24, 0x50, 0xde, 0x25, 0x84, 0x6c, 0x43, 0x77, 0xef, 0x26, 0x5d, 0xa2, 0x79, 0xe0, 0xf0, 0x48, + 0x2b, 0x2d, 0xd7, 0x45, 0xfd, 0xbf, 0x2c, 0x74, 0x70, 0x96, 0x72, 0x4f, 0x73, 0x11, 0xe1, 0x1f, + 0xd0, 0x41, 0xde, 0x85, 0xcf, 0x34, 0x3b, 0xb2, 0xde, 0xb1, 0x8e, 0x6f, 0xdd, 0x7d, 0x9f, 0x2c, + 0x8d, 0x5b, 0x40, 0x49, 0x3c, 0x19, 0xe7, 0x01, 0x45, 0xf2, 0x6c, 0x92, 0x9e, 0x90, 0xe1, 0xe8, + 0x47, 0xf0, 0xf4, 0x39, 0x68, 0xe6, 0xe2, 0xab, 0x59, 0xaf, 0x96, 0xcd, 0x7a, 0x68, 0x19, 0xa3, + 0x0b, 0x2a, 0x0e, 0x50, 0xdb, 0x87, 0x00, 0x34, 0x0c, 0xe3, 0xbc, 0xa2, 0x3a, 0xaa, 0x9b, 0x32, + 0xf7, 0x76, 0x2b, 0x33, 0x58, 0x95, 0xba, 0xaf, 0x65, 0xb3, 0x5e, 0xbb, 0x12, 0xa2, 0x55, 0x78, + 0xff, 0xb7, 0x3a, 0x7a, 0xfd, 0xb1, 0xf0, 0x07, 0x5c, 0xc9, 0xc4, 0x84, 0xdc, 0xc4, 0x1f, 0x83, + 0xfe, 0x1f, 0xfa, 0x1c, 0xa2, 0x3d, 0x15, 0x83, 0x57, 0xb6, 0x77, 0x87, 0x6c, 0x1b, 0x3f, 0xb2, + 0xe5, 0xd7, 0x2e, 0x62, 0xf0, 0xdc, 0x56, 0x89, 0xde, 0xcb, 0x57, 0xd4, 0x80, 0xf0, 0x37, 0x68, + 0x5f, 0x69, 0xa6, 0x13, 0x75, 0xd4, 0x30, 0x48, 0x67, 0x77, 0xa4, 0x91, 0xb9, 0x9d, 0x12, 0xba, + 0x5f, 0xac, 0x69, 0x89, 0xeb, 0xff, 0x61, 0xa1, 0xb7, 0xb6, 0xa8, 0x1e, 0x71, 0xa5, 0xf1, 0x77, + 0x1b, 0x3e, 0x91, 0xdd, 0x7c, 0xca, 0xd5, 0xc6, 0xa5, 0xc3, 0xb2, 0xea, 0xc1, 0x3c, 0xb2, 0xe2, + 0xd1, 0x97, 0xa8, 0xc9, 0x35, 0x84, 0xf9, 0x0c, 0x34, 0x8e, 0x6f, 0xdd, 0x7d, 0x77, 0xe7, 0x8e, + 0xdc, 0x76, 0x49, 0x6d, 0x3e, 0xcc, 0xf5, 0xb4, 0xc0, 0xf4, 0xff, 0xac, 0x6f, 0xed, 0x24, 0x37, + 0x11, 0x3f, 0x41, 0xad, 0x90, 0x47, 0xf7, 0x53, 0xc6, 0x03, 0x36, 0x0a, 0xe0, 0x85, 0xa7, 0x9e, + 0x5f, 0x19, 0x52, 0x5c, 0x19, 0xf2, 0x30, 0xd2, 0x43, 0x79, 0xa1, 0x25, 0x8f, 0xc6, 0xee, 0x61, + 0x36, 0xeb, 0xb5, 0xce, 0x57, 0x48, 0xb4, 0xc2, 0xc5, 0xdf, 0xa3, 0x03, 0x05, 0x01, 0x78, 0x5a, + 0xc8, 0x97, 0x1b, 0xed, 0x47, 0x6c, 0x04, 0xc1, 0x45, 0x29, 0x75, 0x5b, 0xb9, 0x65, 0xf3, 0x15, + 0x5d, 0x20, 0x71, 0x80, 0x3a, 0x21, 0x7b, 0xfa, 0x75, 0xc4, 0x16, 0x8d, 0x34, 0xfe, 0x63, 0x23, + 0x38, 0x9b, 0xf5, 0x3a, 0xe7, 0x15, 0x16, 0x5d, 0x63, 0xf7, 0x7f, 0x6f, 0xa2, 0xb7, 0x6f, 0x1c, + 0x28, 0xfc, 0x05, 0xc2, 0x62, 0xa4, 0x40, 0xa6, 0xe0, 0x7f, 0x56, 0x3c, 0x2a, 0x5c, 0x44, 0xc6, + 0xd8, 0x86, 0xdb, 0x2d, 0x0f, 0x08, 0x0f, 0x37, 0x32, 0xe8, 0x16, 0x15, 0xfe, 0x19, 0xb5, 0xfd, + 0xa2, 0x0a, 0xf8, 0x8f, 0x85, 0x3f, 0x1f, 0x09, 0xf7, 0x25, 0x87, 0x9c, 0x0c, 0x56, 0x21, 0x67, + 0x91, 0x96, 0x53, 0xf7, 0x8d, 0xf2, 0x57, 0xda, 0x95, 0x3d, 0x5a, 0xad, 0x97, 0x37, 0xe3, 0x2f, + 0x90, 0xea, 0x7e, 0x10, 0x88, 0x9f, 0xc0, 0x37, 0xe6, 0x36, 0x97, 0xcd, 0x0c, 0x36, 0x32, 0xe8, + 0x16, 0x15, 0xfe, 0x04, 0x75, 0xbc, 0x44, 0x4a, 0x88, 0xf4, 0xe7, 0xc0, 0x02, 0x7d, 0x39, 0x3d, + 0xda, 0x33, 0x9c, 0x37, 0x4b, 0x4e, 0xe7, 0x41, 0x65, 0x97, 0xae, 0x65, 0xe7, 0x7a, 0x1f, 0x14, + 0x97, 0xe0, 0xcf, 0xf5, 0xcd, 0xaa, 0x7e, 0x50, 0xd9, 0xa5, 0x6b, 0xd9, 0xf8, 0x14, 0xb5, 0xe0, + 0x69, 0x0c, 0xde, 0xdc, 0xcb, 0x7d, 0xa3, 0xbe, 0x5d, 0xaa, 0x5b, 0x67, 0x2b, 0x7b, 0xb4, 0x92, + 0x89, 0x3d, 0x84, 0x3c, 0x11, 0xf9, 0xbc, 0x78, 0x9a, 0x5f, 0x31, 0x67, 0xe0, 0xec, 0x36, 0xbf, + 0x0f, 0xe6, 0xba, 0xe5, 0xc3, 0xb8, 0x08, 0x29, 0xba, 0x82, 0xed, 0x06, 0x08, 0x6f, 0x1e, 0x13, + 0x3e, 0x44, 0x8d, 0x09, 0x4c, 0xcd, 0xf8, 0xbc, 0x4a, 0xf3, 0x4f, 0xfc, 0x29, 0x6a, 0xa6, 0x2c, + 0x48, 0xa0, 0xbc, 0x47, 0xef, 0xed, 0xf6, 0x1f, 0x5f, 0xf1, 0x10, 0x68, 0x21, 0xfc, 0xa8, 0x7e, + 0x6a, 0xb9, 0xc7, 0x57, 0xd7, 0x76, 0xed, 0xd9, 0xb5, 0x5d, 0x7b, 0x7e, 0x6d, 0xd7, 0x7e, 0xc9, + 0x6c, 0xeb, 0x2a, 0xb3, 0xad, 0x67, 0x99, 0x6d, 0x3d, 0xcf, 0x6c, 0xeb, 0xef, 0xcc, 0xb6, 0x7e, + 0xfd, 0xc7, 0xae, 0x7d, 0x5b, 0x4f, 0x4f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x1b, 0x9d, + 0x9f, 0x62, 0x08, 0x00, 0x00, +} + +func (m *Eviction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { @@ -474,6 +551,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PodDisruptionBudget) Size() (n int) { if m == nil { return 0 @@ -562,6 +654,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Eviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} func (this *PodDisruptionBudget) String() string { if this == nil { return "nil" @@ -641,6 +744,125 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index f57514112a099..5b798423564c2 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -29,6 +29,19 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +message Eviction { + // ObjectMeta describes the pod that is being evicted. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DeleteOptions may be provided + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; +} + // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { // Standard object's metadata. diff --git a/vendor/k8s.io/api/policy/v1/register.go b/vendor/k8s.io/api/policy/v1/register.go index 603c49b9cefc0..7fb9fd3e1f11a 100644 --- a/vendor/k8s.io/api/policy/v1/register.go +++ b/vendor/k8s.io/api/policy/v1/register.go @@ -44,6 +44,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, + &Eviction{}, ) // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 65bf768d8669e..4a03696f000d2 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -21,6 +21,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +// DisruptionBudgetCause is the status cause returned for eviction failures caused by PodDisruptionBudget violations. +const DisruptionBudgetCause metav1.CauseType = "DisruptionBudget" + // PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. type PodDisruptionBudgetSpec struct { // An eviction is allowed if at least "minAvailable" pods selected by @@ -148,3 +151,22 @@ type PodDisruptionBudgetList struct { // Items is a list of PodDisruptionBudgets Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +type Eviction struct { + metav1.TypeMeta `json:",inline"` + + // ObjectMeta describes the pod that is being evicted. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DeleteOptions may be provided + // +optional + DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` +} diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 0b80a1dccf102..3208392e8808c 100644 --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -27,6 +27,16 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Eviction = map[string]string{ + "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", + "metadata": "ObjectMeta describes the pod that is being evicted.", + "deleteOptions": "DeleteOptions may be provided", +} + +func (Eviction) SwaggerDoc() map[string]string { + return map_Eviction +} + var map_PodDisruptionBudget = map[string]string{ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go index 78c0adbd705a5..485e1c9380059 100644 --- a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -26,6 +27,37 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eviction) DeepCopyInto(out *Eviction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + *out = new(metav1.DeleteOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction. +func (in *Eviction) DeepCopy() *Eviction { + if in == nil { + return nil + } + out := new(Eviction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Eviction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { *out = *in diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index a47212142dc77..8a2824b51bf15 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -105,6 +105,8 @@ message IDRange { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -119,9 +121,12 @@ message PodDisruptionBudget { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. message PodDisruptionBudgetList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + // items list individual PodDisruptionBudget objects repeated PodDisruptionBudget items = 2; } @@ -139,7 +144,6 @@ message PodDisruptionBudgetSpec { // A null selector selects no pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. - // +patchStrategy=replace // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 2811044518e23..486f93461afb8 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -36,9 +36,8 @@ type PodDisruptionBudgetSpec struct { // A null selector selects no pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. - // +patchStrategy=replace // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" patchStrategy:"replace" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of @@ -129,6 +128,9 @@ const ( // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -149,9 +151,13 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` + // items list individual PodDisruptionBudget objects + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 0853a5e996692..ef81d43af36df 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -96,9 +96,10 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_PodDisruptionBudget = map[string]string{ - "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", - "spec": "Specification of the desired behavior of the PodDisruptionBudget.", - "status": "Most recently observed status of the PodDisruptionBudget.", + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", } func (PodDisruptionBudget) SwaggerDoc() map[string]string { @@ -106,7 +107,9 @@ func (PodDisruptionBudget) SwaggerDoc() map[string]string { } var map_PodDisruptionBudgetList = map[string]string{ - "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items list individual PodDisruptionBudget objects", } func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 02d8a85cf7046..0a6239b87e7c8 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go index 8bda4b00f24f5..612061d6cf71e 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 22c6dae4b0353..2cf427f12215f 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -92,7 +92,7 @@ message ClusterRoleList { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of @@ -100,7 +100,7 @@ message PolicyRule { // +optional repeated string apiGroups = 2; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional repeated string resources = 3; @@ -164,6 +164,7 @@ message RoleList { } // RoleRef contains information that points to the role being used +// +structType=atomic message RoleRef { // APIGroup is the group for the resource being referenced optional string apiGroup = 1; @@ -177,6 +178,7 @@ message RoleRef { // Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, // or a value for non-objects such as user and group names. +// +structType=atomic message Subject { // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". // If the Authorizer does not recognized the kind value, the Authorizer should report an error. diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 7ba7d05435e10..067b6f15efbf5 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -47,14 +47,14 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. @@ -70,6 +70,7 @@ type PolicyRule struct { // Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, // or a value for non-objects such as user and group names. +// +structType=atomic type Subject struct { // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". // If the Authorizer does not recognized the kind value, the Authorizer should report an error. @@ -88,6 +89,7 @@ type Subject struct { } // RoleRef contains information that points to the role being used +// +structType=atomic type RoleRef struct { // APIGroup is the group for the resource being referenced APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 83ce310e6fa36..228ee54c086c4 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -80,9 +80,9 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", } diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 095a5e9c28a57..eab086899d142 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index caed7ec3069d5..9795cffd9e479 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -96,7 +96,7 @@ message ClusterRoleList { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of @@ -104,7 +104,7 @@ message PolicyRule { // +optional repeated string apiGroups = 3; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional repeated string resources = 4; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 538ae4c9b0aac..13a0a1f0a3de8 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -47,14 +47,14 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index acb84478253b7..46b8b9ee6f34e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -80,9 +80,9 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 0358227fabc81..9288bd0179f4d 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 17d3741f0c809..53c252554db17 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -96,7 +96,7 @@ message ClusterRoleList { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index f167888119518..96e6b18f542bf 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -47,7 +47,7 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 0512301f5af54..5d57cb348f063 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -80,7 +80,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 7ffe58106572e..8abaa90c10a73 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go index a4d99b35ac345..3e38fff084c6e 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go index 63bfe64042518..e7ff186306a4b 100644 --- a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index 0392823975fc3..b130c990e48c2 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6e2008578eea4..ff38c9854f077 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go index 45969d15c0b80..15aefb10851cf 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 0e9a2e1da3f92..de639354d3d0c 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -142,7 +142,7 @@ message CSIDriverSpec { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -154,11 +154,13 @@ message CSIDriverSpec { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is alpha-level, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // + // Defaults to ReadWriteOnceWithFSType, which will examine each volume + // to determine if Kubernetes should modify ownership and permissions of the volume. + // With the default policy the defined fsGroup will only be applied + // if a fstype is defined and the volume's access mode contains ReadWriteOnce. // +optional optional string fsGroupPolicy = 5; @@ -178,9 +180,6 @@ message CSIDriverSpec { // most one token is empty string. To receive a new token after expiry, // RequiresRepublish can be used to trigger NodePublishVolume periodically. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional // +listType=atomic repeated TokenRequest tokenRequests = 6; @@ -193,9 +192,6 @@ message CSIDriverSpec { // to NodePublishVolume should only update the contents of the volume. New // mount points will not be seen by a running container. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional optional bool requiresRepublish = 7; } @@ -319,6 +315,7 @@ message StorageClass { // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional + // +listType=atomic repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 6a7bf492920ae..6da0657ecc235 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -71,6 +71,7 @@ type StorageClass struct { // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional + // +listType=atomic AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` } @@ -89,6 +90,7 @@ type StorageClassList struct { } // VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +// +enum type VolumeBindingMode string const ( @@ -340,7 +342,7 @@ type CSIDriverSpec struct { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -352,11 +354,13 @@ type CSIDriverSpec struct { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is alpha-level, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // + // Defaults to ReadWriteOnceWithFSType, which will examine each volume + // to determine if Kubernetes should modify ownership and permissions of the volume. + // With the default policy the defined fsGroup will only be applied + // if a fstype is defined and the volume's access mode contains ReadWriteOnce. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` @@ -376,9 +380,6 @@ type CSIDriverSpec struct { // most one token is empty string. To receive a new token after expiry, // RequiresRepublish can be used to trigger NodePublishVolume periodically. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` @@ -391,9 +392,6 @@ type CSIDriverSpec struct { // to NodePublishVolume should only update the contents of the volume. New // mount points will not be seen by a running container. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } @@ -414,10 +412,11 @@ const ( ReadWriteOnceWithFSTypeFSGroupPolicy FSGroupPolicy = "ReadWriteOnceWithFSType" // FileFSGroupPolicy indicates that CSI driver supports volume ownership - // and permission change via fsGroup, and Kubernetes may use fsGroup - // to change permissions and ownership of the volume to match user requested fsGroup in + // and permission change via fsGroup, and Kubernetes will change the permissions + // and ownership of every file in the volume to match the user requested fsGroup in // the pod's SecurityPolicy regardless of fstype or access mode. - // This mode should be defined if the fsGroup is expected to always change on mount + // Use this mode if Kubernetes should modify the permissions and ownership + // of the volume. FileFSGroupPolicy FSGroupPolicy = "File" // NoneFSGroupPolicy indicates that volumes will be mounted without performing diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index a9c7cc9afddb9..ed5b18cb7a8ed 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -52,10 +52,10 @@ var map_CSIDriverSpec = map[string]string{ "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field is immutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.", + "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", + "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index f4de942161045..300f42cbba4be 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index 64a34670b37e6..d9bc94b250057 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go index 44311b4bbf660..41114c3c68f2e 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 5ed6413ec0e0a..bfe8280d53c35 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -144,7 +144,7 @@ message CSIDriverSpec { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -156,11 +156,13 @@ message CSIDriverSpec { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is alpha-level, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // + // Defaults to ReadWriteOnceWithFSType, which will examine each volume + // to determine if Kubernetes should modify ownership and permissions of the volume. + // With the default policy the defined fsGroup will only be applied + // if a fstype is defined and the volume's access mode contains ReadWriteOnce. // +optional optional string fsGroupPolicy = 5; @@ -180,9 +182,6 @@ message CSIDriverSpec { // most one token is empty string. To receive a new token after expiry, // RequiresRepublish can be used to trigger NodePublishVolume periodically. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional // +listType=atomic repeated TokenRequest tokenRequests = 6; @@ -195,9 +194,6 @@ message CSIDriverSpec { // to NodePublishVolume should only update the contents of the volume. New // mount points will not be seen by a running container. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional optional bool requiresRepublish = 7; } @@ -412,6 +408,7 @@ message StorageClass { // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional + // +listType=atomic repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index d1ffe7a0ec59a..524d8b53414bb 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -75,6 +75,7 @@ type StorageClass struct { // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional + // +listType=atomic AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` } @@ -361,7 +362,7 @@ type CSIDriverSpec struct { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -373,11 +374,13 @@ type CSIDriverSpec struct { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is alpha-level, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // + // Defaults to ReadWriteOnceWithFSType, which will examine each volume + // to determine if Kubernetes should modify ownership and permissions of the volume. + // With the default policy the defined fsGroup will only be applied + // if a fstype is defined and the volume's access mode contains ReadWriteOnce. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` @@ -397,9 +400,6 @@ type CSIDriverSpec struct { // most one token is empty string. To receive a new token after expiry, // RequiresRepublish can be used to trigger NodePublishVolume periodically. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` @@ -412,9 +412,6 @@ type CSIDriverSpec struct { // to NodePublishVolume should only update the contents of the volume. New // mount points will not be seen by a running container. // - // This is a beta feature and only available when the - // CSIServiceAccountToken feature is enabled. - // // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } @@ -433,9 +430,11 @@ const ( ReadWriteOnceWithFSTypeFSGroupPolicy FSGroupPolicy = "ReadWriteOnceWithFSType" // FileFSGroupPolicy indicates that CSI driver supports volume ownership - // and permission change via fsGroup, and Kubernetes may use fsGroup - // to change permissions and ownership of the volume to match user requested fsGroup in + // and permission change via fsGroup, and Kubernetes will change the permissions + // and ownership of every file in the volume to match the user requested fsGroup in // the pod's SecurityPolicy regardless of fstype or access mode. + // Use this mode if Kubernetes should modify the permissions and ownership + // of the volume. FileFSGroupPolicy FSGroupPolicy = "File" // None indicates that volumes will be mounted without performing diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 0d03a30b4f710..f4dbf0fefa748 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -52,10 +52,10 @@ var map_CSIDriverSpec = map[string]string{ "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field is immutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.", + "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", + "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 9b7e675ee6ffe..5411ed8c00ee4 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go index 275b5d5dbed36..7b69b2cbca339 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 4ba4022b6ea5e..d4c73022fe883 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -11,7 +11,6 @@ reviewers: - caesarxuchao - mikedanese - liggitt -- erictune - saad-ali - janetkuo - tallclair diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index d3927d8173825..97e17be3941a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -44,6 +44,28 @@ type APIStatus interface { var _ error = &StatusError{} +var knownReasons = map[metav1.StatusReason]struct{}{ + // metav1.StatusReasonUnknown : {} + metav1.StatusReasonUnauthorized: {}, + metav1.StatusReasonForbidden: {}, + metav1.StatusReasonNotFound: {}, + metav1.StatusReasonAlreadyExists: {}, + metav1.StatusReasonConflict: {}, + metav1.StatusReasonGone: {}, + metav1.StatusReasonInvalid: {}, + metav1.StatusReasonServerTimeout: {}, + metav1.StatusReasonTimeout: {}, + metav1.StatusReasonTooManyRequests: {}, + metav1.StatusReasonBadRequest: {}, + metav1.StatusReasonMethodNotAllowed: {}, + metav1.StatusReasonNotAcceptable: {}, + metav1.StatusReasonRequestEntityTooLarge: {}, + metav1.StatusReasonUnsupportedMediaType: {}, + metav1.StatusReasonInternalError: {}, + metav1.StatusReasonExpired: {}, + metav1.StatusReasonServiceUnavailable: {}, +} + // Error implements the Error interface. func (e *StatusError) Error() string { return e.ErrStatus.Message @@ -148,6 +170,25 @@ func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *Stat }} } +// NewGenerateNameConflict returns an error indicating the server +// was not able to generate a valid name for a resource. +func NewGenerateNameConflict(qualifiedResource schema.GroupResource, name string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonAlreadyExists, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: fmt.Sprintf( + "%s %q already exists, the server was not able to generate a unique name for the object", + qualifiedResource.String(), name), + }} +} + // NewUnauthorized returns an error indicating the client is not authorized to perform the requested // action. func NewUnauthorized(reason string) *StatusError { @@ -248,7 +289,7 @@ func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorLis Field: err.Field, }) } - return &StatusError{metav1.Status{ + err := &StatusError{metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusUnprocessableEntity, Reason: metav1.StatusReasonInvalid, @@ -258,8 +299,14 @@ func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorLis Name: name, Causes: causes, }, - Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), }} + aggregatedErrs := errs.ToAggregate() + if aggregatedErrs == nil { + err.ErrStatus.Message = fmt.Sprintf("%s %q is invalid", qualifiedKind.String(), name) + } else { + err.ErrStatus.Message = fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, aggregatedErrs) + } + return err } // NewBadRequest creates an error that indicates that the request is invalid and can not be processed. @@ -274,7 +321,7 @@ func NewBadRequest(reason string) *StatusError { // NewTooManyRequests creates an error that indicates that the client must try again later because // the specified endpoint is not accepting requests. More specific details should be provided -// if client should know why the failure was limited4. +// if client should know why the failure was limited. func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, @@ -478,7 +525,14 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr // IsNotFound returns true if the specified error was created by NewNotFound. // It supports wrapped errors. func IsNotFound(err error) bool { - return ReasonForError(err) == metav1.StatusReasonNotFound + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonNotFound { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusNotFound { + return true + } + return false } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. @@ -490,19 +544,40 @@ func IsAlreadyExists(err error) bool { // IsConflict determines if the err is an error which indicates the provided update conflicts. // It supports wrapped errors. func IsConflict(err error) bool { - return ReasonForError(err) == metav1.StatusReasonConflict + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonConflict { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusConflict { + return true + } + return false } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. // It supports wrapped errors. func IsInvalid(err error) bool { - return ReasonForError(err) == metav1.StatusReasonInvalid + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonInvalid { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusUnprocessableEntity { + return true + } + return false } // IsGone is true if the error indicates the requested resource is no longer available. // It supports wrapped errors. func IsGone(err error) bool { - return ReasonForError(err) == metav1.StatusReasonGone + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonGone { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusGone { + return true + } + return false } // IsResourceExpired is true if the error indicates the resource has expired and the current action is @@ -515,77 +590,147 @@ func IsResourceExpired(err error) bool { // IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header // It supports wrapped errors. func IsNotAcceptable(err error) bool { - return ReasonForError(err) == metav1.StatusReasonNotAcceptable + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonNotAcceptable { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusNotAcceptable { + return true + } + return false } // IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header // It supports wrapped errors. func IsUnsupportedMediaType(err error) bool { - return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonUnsupportedMediaType { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusUnsupportedMediaType { + return true + } + return false } // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. // It supports wrapped errors. func IsMethodNotSupported(err error) bool { - return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonMethodNotAllowed { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusMethodNotAllowed { + return true + } + return false } // IsServiceUnavailable is true if the error indicates the underlying service is no longer available. // It supports wrapped errors. func IsServiceUnavailable(err error) bool { - return ReasonForError(err) == metav1.StatusReasonServiceUnavailable + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonServiceUnavailable { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusServiceUnavailable { + return true + } + return false } // IsBadRequest determines if err is an error which indicates that the request is invalid. // It supports wrapped errors. func IsBadRequest(err error) bool { - return ReasonForError(err) == metav1.StatusReasonBadRequest + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonBadRequest { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusBadRequest { + return true + } + return false } // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. // It supports wrapped errors. func IsUnauthorized(err error) bool { - return ReasonForError(err) == metav1.StatusReasonUnauthorized + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonUnauthorized { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusUnauthorized { + return true + } + return false } // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. // It supports wrapped errors. func IsForbidden(err error) bool { - return ReasonForError(err) == metav1.StatusReasonForbidden + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonForbidden { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusForbidden { + return true + } + return false } // IsTimeout determines if err is an error which indicates that request times out due to long // processing. // It supports wrapped errors. func IsTimeout(err error) bool { - return ReasonForError(err) == metav1.StatusReasonTimeout + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonTimeout { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusGatewayTimeout { + return true + } + return false } // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. // It supports wrapped errors. func IsServerTimeout(err error) bool { + // do not check the status code, because no https status code exists that can + // be scoped to retryable timeouts. return ReasonForError(err) == metav1.StatusReasonServerTimeout } // IsInternalError determines if err is an error which indicates an internal server error. // It supports wrapped errors. func IsInternalError(err error) bool { - return ReasonForError(err) == metav1.StatusReasonInternalError + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonInternalError { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusInternalServerError { + return true + } + return false } // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. // It supports wrapped errors. func IsTooManyRequests(err error) bool { - if ReasonForError(err) == metav1.StatusReasonTooManyRequests { + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonTooManyRequests { return true } - if status := APIStatus(nil); errors.As(err, &status) { - return status.Status().Code == http.StatusTooManyRequests + + // IsTooManyRequests' checking of code predates the checking of the code in + // the other Is* functions. In order to maintain backward compatibility, this + // does not check that the reason is unknown. + if code == http.StatusTooManyRequests { + return true } return false } @@ -594,11 +739,16 @@ func IsTooManyRequests(err error) bool { // the request entity is too large. // It supports wrapped errors. func IsRequestEntityTooLargeError(err error) bool { - if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge { + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonRequestEntityTooLarge { return true } - if status := APIStatus(nil); errors.As(err, &status) { - return status.Status().Code == http.StatusRequestEntityTooLarge + + // IsRequestEntityTooLargeError's checking of code predates the checking of + // the code in the other Is* functions. In order to maintain backward + // compatibility, this does not check that the reason is unknown. + if code == http.StatusRequestEntityTooLarge { + return true } return false } @@ -653,6 +803,13 @@ func ReasonForError(err error) metav1.StatusReason { return metav1.StatusReasonUnknown } +func reasonAndCodeForError(err error) (metav1.StatusReason, int32) { + if status := APIStatus(nil); errors.As(err, &status) { + return status.Status().Reason, status.Status().Code + } + return metav1.StatusReasonUnknown, 0 +} + // ErrorReporter converts generic errors into runtime.Object errors without // requiring the caller to take a dependency on meta/v1 (where Status lives). // This prevents circular dependencies in core watch code. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go index fd22100229062..1bc816fe3f3b2 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go @@ -23,6 +23,10 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" ) +var ( + _ ResettableRESTMapper = &FirstHitRESTMapper{} +) + // FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the // first successful result for the singular requests type FirstHitRESTMapper struct { @@ -75,6 +79,10 @@ func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) return nil, collapseAggregateErrors(errors) } +func (m FirstHitRESTMapper) Reset() { + m.MultiRESTMapper.Reset() +} + // collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list // by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) func collapseAggregateErrors(errors []error) error { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index 42eac3af00146..a35ce3bd0aad1 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -132,3 +132,12 @@ type RESTMapper interface { ResourceSingularizer(resource string) (singular string, err error) } + +// ResettableRESTMapper is a RESTMapper which is capable of resetting itself +// from discovery. +// All rest mappers that delegate to other rest mappers must implement this interface and dynamically +// check if the delegate mapper supports the Reset() operation. +type ResettableRESTMapper interface { + RESTMapper + Reset() +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go index 431a0a6353247..a4298114b6dcb 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -32,7 +32,7 @@ type lazyObject struct { mapper RESTMapper } -// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// NewLazyRESTMapperLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by // returning those initialization errors when the interface methods are invoked. This defers the // initialization and any server calls until a client actually needs to perform the action. func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { @@ -52,7 +52,7 @@ func (o *lazyObject) init() error { return o.err } -var _ RESTMapper = &lazyObject{} +var _ ResettableRESTMapper = &lazyObject{} func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { if err := o.init(); err != nil { @@ -102,3 +102,11 @@ func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err } return o.mapper.ResourceSingularizer(resource) } + +func (o *lazyObject) Reset() { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded && o.err == nil { + MaybeResetRESTMapper(o.mapper) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go index 6b01bf197fa3d..b7e97125052f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -24,11 +24,15 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" ) +var ( + _ ResettableRESTMapper = MultiRESTMapper{} +) + // MultiRESTMapper is a wrapper for multiple RESTMappers. type MultiRESTMapper []RESTMapper func (m MultiRESTMapper) String() string { - nested := []string{} + nested := make([]string, 0, len(m)) for _, t := range m { currString := fmt.Sprintf("%v", t) splitStrings := strings.Split(currString, "\n") @@ -208,3 +212,9 @@ func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ( } return allMappings, nil } + +func (m MultiRESTMapper) Reset() { + for _, t := range m { + MaybeResetRESTMapper(t) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go index fa11c580f7003..4f097c9c90d7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -29,6 +29,10 @@ const ( AnyKind = "*" ) +var ( + _ ResettableRESTMapper = PriorityRESTMapper{} +) + // PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind // when multiple matches are possible type PriorityRESTMapper struct { @@ -220,3 +224,7 @@ func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.Group func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { return m.Delegate.KindsFor(partiallySpecifiedResource) } + +func (m PriorityRESTMapper) Reset() { + MaybeResetRESTMapper(m.Delegate) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 00bd86f51a7e3..f41b9bf78cec9 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -519,3 +519,12 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string } return mappings, nil } + +// MaybeResetRESTMapper calls Reset() on the mapper if it is a ResettableRESTMapper. +func MaybeResetRESTMapper(mapper RESTMapper) bool { + m, ok := mapper.(ResettableRESTMapper) + if ok { + m.Reset() + } + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 2e09f4face77a..172db57fac16f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -61,8 +61,32 @@ func (m *Quantity) XXX_DiscardUnknown() { var xxx_messageInfo_Quantity proto.InternalMessageInfo +func (m *QuantityValue) Reset() { *m = QuantityValue{} } +func (*QuantityValue) ProtoMessage() {} +func (*QuantityValue) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{1} +} +func (m *QuantityValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QuantityValue.Unmarshal(m, b) +} +func (m *QuantityValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QuantityValue.Marshal(b, m, deterministic) +} +func (m *QuantityValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuantityValue.Merge(m, src) +} +func (m *QuantityValue) XXX_Size() int { + return xxx_messageInfo_QuantityValue.Size(m) +} +func (m *QuantityValue) XXX_DiscardUnknown() { + xxx_messageInfo_QuantityValue.DiscardUnknown(m) +} + +var xxx_messageInfo_QuantityValue proto.InternalMessageInfo + func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") + proto.RegisterType((*QuantityValue)(nil), "k8s.io.apimachinery.pkg.api.resource.QuantityValue") } func init() { @@ -70,20 +94,21 @@ func init() { } var fileDescriptor_612bba87bd70906c = []byte{ - // 237 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, - 0xc5, 0xc8, 0xce, 0x00, 0x23, 0x5b, 0x92, 0x1e, 0xae, 0x15, 0xd5, 0x8e, 0x2e, 0x36, 0x52, 0xb7, - 0x8e, 0x8c, 0x1d, 0x19, 0x9b, 0xbf, 0xe9, 0xd8, 0xb1, 0x03, 0x03, 0x31, 0x3f, 0x82, 0xea, 0x36, - 0x52, 0xb7, 0x7b, 0xef, 0xf4, 0x4e, 0x97, 0xbd, 0xd4, 0xd3, 0x56, 0x1a, 0xa7, 0xea, 0x50, 0x12, - 0x5b, 0xf2, 0xd4, 0xaa, 0x4f, 0xb2, 0x33, 0xc7, 0xea, 0xb4, 0x28, 0x1a, 0xb3, 0x28, 0xaa, 0xb9, - 0xb1, 0xc4, 0x4b, 0xd5, 0xd4, 0xfa, 0x20, 0x14, 0x53, 0xeb, 0x02, 0x57, 0xa4, 0x34, 0x59, 0xe2, - 0xc2, 0xd3, 0x4c, 0x36, 0xec, 0xbc, 0x1b, 0xdf, 0x1f, 0x2b, 0x79, 0x5e, 0xc9, 0xa6, 0xd6, 0x07, - 0x21, 0x87, 0xea, 0xf6, 0x51, 0x1b, 0x3f, 0x0f, 0xa5, 0xac, 0xdc, 0x42, 0x69, 0xa7, 0x9d, 0x4a, - 0x71, 0x19, 0x3e, 0x12, 0x25, 0x48, 0xd3, 0xf1, 0xe8, 0xdd, 0x34, 0x1b, 0xbd, 0x86, 0xc2, 0x7a, - 0xe3, 0x97, 0xe3, 0xeb, 0xec, 0xa2, 0xf5, 0x6c, 0xac, 0xbe, 0x11, 0x13, 0xf1, 0x70, 0xf9, 0x76, - 0xa2, 0xa7, 0xab, 0xef, 0x4d, 0x0e, 0x5f, 0x5d, 0x0e, 0xeb, 0x2e, 0x87, 0x4d, 0x97, 0xc3, 0xea, - 0x67, 0x02, 0xcf, 0x72, 0xdb, 0x23, 0xec, 0x7a, 0x84, 0x7d, 0x8f, 0xb0, 0x8a, 0x28, 0xb6, 0x11, - 0xc5, 0x2e, 0xa2, 0xd8, 0x47, 0x14, 0xbf, 0x11, 0xc5, 0xfa, 0x0f, 0xe1, 0x7d, 0x34, 0x3c, 0xf6, - 0x1f, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x08, 0x88, 0x49, 0x0e, 0x01, 0x00, 0x00, + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0xcd, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0x4b, + 0xcd, 0x4b, 0xc9, 0x2f, 0xd2, 0x87, 0x4a, 0x24, 0x16, 0x64, 0xe6, 0x26, 0x26, 0x67, 0x64, 0xe6, + 0xa5, 0x16, 0x55, 0xea, 0x17, 0x64, 0xa7, 0x83, 0x04, 0xf4, 0x8b, 0x52, 0x8b, 0xf3, 0x4b, 0x8b, + 0x92, 0x53, 0xf5, 0xd3, 0x53, 0xf3, 0x52, 0x8b, 0x12, 0x4b, 0x52, 0x53, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0x54, 0x20, 0xba, 0xf4, 0x90, 0x75, 0xe9, 0x15, 0x64, 0xa7, 0x83, 0x04, 0xf4, + 0x60, 0xba, 0xa4, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, + 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x18, + 0xaa, 0x64, 0xc1, 0xc5, 0x11, 0x58, 0x9a, 0x98, 0x57, 0x92, 0x59, 0x52, 0x29, 0x24, 0xc6, 0xc5, + 0x56, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x59, + 0x89, 0xcc, 0x58, 0x20, 0xcf, 0xd0, 0xb1, 0x50, 0x9e, 0x61, 0xc2, 0x42, 0x79, 0x86, 0x05, 0x0b, + 0xe5, 0x19, 0x1a, 0xee, 0x28, 0x30, 0x28, 0xd9, 0x72, 0xf1, 0xc2, 0x74, 0x86, 0x25, 0xe6, 0x94, + 0xa6, 0x92, 0xa6, 0xdd, 0x49, 0xef, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, + 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37, + 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x07, 0xcc, 0x5f, + 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0x76, 0x9f, 0x66, 0x4d, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 472104d542952..54240b7b5f2ee 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -86,3 +86,15 @@ message Quantity { optional string string = 1; } +// QuantityValue makes it possible to use a Quantity as value for a command +// line parameter. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +message QuantityValue { + optional string string = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 8d718945d0661..6d43868ba800b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -460,17 +460,7 @@ func (q *Quantity) AsApproximateFloat64() float64 { return base } - // multiply by the appropriate exponential scale - switch q.Format { - case DecimalExponent, DecimalSI: - return base * math.Pow10(exponent) - default: - // fast path for exponents that can fit in 64 bits - if exponent > 0 && exponent < 7 { - return base * float64(int64(1)<<(exponent*10)) - } - return base * math.Pow(2, float64(exponent*10)) - } + return base * math.Pow10(exponent) } // AsInt64 returns a representation of the current value as an int64 if a fast conversion @@ -696,6 +686,15 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +// NewDecimalQuantity returns a new Quantity representing the given +// value in the given format. +func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { + return &Quantity{ + d: infDecAmount{&b}, + Format: format, + } +} + // NewQuantity returns a new Quantity representing the given // value in the given format. func NewQuantity(value int64, format Format) *Quantity { @@ -765,3 +764,30 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } + +// QuantityValue makes it possible to use a Quantity as value for a command +// line parameter. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +type QuantityValue struct { + Quantity +} + +// Set implements pflag.Value.Set and Go flag.Value.Set. +func (q *QuantityValue) Set(s string) error { + quantity, err := ParseQuantity(s) + if err != nil { + return err + } + q.Quantity = quantity + return nil +} + +// Type implements pflag.Value.Type. +func (q QuantityValue) Type() string { + return "quantity" +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index f89ca163cd39a..3e0cdb10d4090 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -166,7 +166,7 @@ func (m *Quantity) Unmarshal(data []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index ab47407900cf0..abb00f38e228e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -25,3 +26,20 @@ func (in *Quantity) DeepCopyInto(out *Quantity) { *out = in.DeepCopy() return } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuantityValue) DeepCopyInto(out *QuantityValue) { + *out = *in + out.Quantity = in.Quantity.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuantityValue. +func (in *QuantityValue) DeepCopy() *QuantityValue { + if in == nil { + return nil + } + out := new(QuantityValue) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index a9b28f24426ba..6d212b846a4ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index d5e4fc680d4ec..6e1eac5c75a72 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 579af62ba76a8..5731b9ee20a25 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -8,7 +8,6 @@ reviewers: - brendandburns - caesarxuchao - liggitt -- erictune - davidopp - sttts - quinton-hoole @@ -20,6 +19,5 @@ reviewers: - dims - hongchaodeng - krousey -- mml - therc - kevin-wangzefeng diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 55945badab474..9e7924c12f9ea 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1326,13 +1326,13 @@ func init() { } var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2813 bytes of a gzipped FileDescriptorProto + // 2859 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0xcb, 0x6f, 0x24, 0x47, 0xf9, 0xee, 0x19, 0x8f, 0x3d, 0xf3, 0x8d, 0xc7, 0x8f, 0x5a, 0xef, 0xef, 0x37, 0x6b, 0x84, 0xc7, 0xe9, 0xa0, 0x68, 0x03, 0xc9, 0x38, 0x5e, 0x42, 0xb4, 0xd9, 0x90, 0x80, 0xc7, 0xb3, 0xde, 0x98, 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x40, 0x88, 0x50, 0xda, 0xdd, 0xe5, 0x71, 0xe3, 0x9e, 0xee, 0x49, - 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x44, 0xe1, 0x86, 0x38, 0xa0, 0x44, 0xf0, - 0x17, 0x70, 0x81, 0x3f, 0x00, 0x89, 0x1c, 0x23, 0x71, 0x89, 0x04, 0x1a, 0x25, 0xe6, 0xc0, 0x11, + 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x8a, 0xc2, 0x8d, 0x13, 0x4a, 0x04, 0x7f, + 0x00, 0xe2, 0x02, 0x7f, 0x00, 0x12, 0x39, 0x06, 0x71, 0x89, 0x04, 0x1a, 0x25, 0xe6, 0xc0, 0x11, 0x71, 0xf5, 0x05, 0x54, 0x8f, 0xee, 0xae, 0x9e, 0xc7, 0xba, 0x27, 0xbb, 0x44, 0xdc, 0xa6, 0xbf, 0x77, 0x55, 0x7d, 0xf5, 0xbd, 0x6a, 0x60, 0xf7, 0xe4, 0x3a, 0xab, 0xbb, 0xc1, 0xfa, 0x49, 0xf7, 0x90, 0x50, 0x9f, 0x84, 0x84, 0xad, 0x9f, 0x12, 0xdf, 0x09, 0xe8, 0xba, 0x42, 0x58, 0x1d, 0xb7, @@ -1342,14 +1342,14 @@ var fileDescriptor_cf52fa777ced5367 = []byte{ 0x8d, 0x95, 0xa7, 0x5b, 0x6e, 0x78, 0xdc, 0x3d, 0xac, 0xdb, 0x41, 0x7b, 0xbd, 0x15, 0xb4, 0x82, 0x75, 0xc1, 0x7c, 0xd8, 0x3d, 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0xc6, 0x9a, 0x42, 0xbb, 0x7e, 0xe8, 0xb6, 0xc9, 0xa0, 0x15, 0x2b, 0xcf, 0x5d, 0xc4, 0xc0, 0xec, 0x63, 0xd2, 0xb6, - 0x06, 0xf9, 0xcc, 0x3f, 0xe7, 0xa1, 0xb8, 0xb9, 0xbf, 0x73, 0x8b, 0x06, 0xdd, 0x0e, 0x5a, 0x83, + 0x06, 0xf9, 0xcc, 0x3f, 0xe5, 0xa1, 0xb8, 0xb9, 0xbf, 0x73, 0x8b, 0x06, 0xdd, 0x0e, 0x5a, 0x83, 0x69, 0xdf, 0x6a, 0x93, 0xaa, 0xb1, 0x66, 0x5c, 0x2d, 0x35, 0xe6, 0x3e, 0xe8, 0xd7, 0xa6, 0xce, 0xfa, 0xb5, 0xe9, 0x57, 0xad, 0x36, 0xc1, 0x02, 0x83, 0x3c, 0x28, 0x9e, 0x12, 0xca, 0xdc, 0xc0, 0x67, 0xd5, 0xdc, 0x5a, 0xfe, 0x6a, 0xf9, 0xda, 0x4b, 0xf5, 0x2c, 0xeb, 0xaf, 0x0b, 0x05, 0xf7, 0x24, 0xeb, 0x76, 0x40, 0x9b, 0x2e, 0xb3, 0x83, 0x53, 0x42, 0x7b, 0x8d, 0x45, 0xa5, 0xa5, 0xa8, 0x90, 0x0c, 0xc7, 0x1a, 0xd0, 0x8f, 0x0c, 0x58, 0xec, 0x50, 0x72, 0x44, 0x28, 0x25, 0x8e, 0xc2, 0x57, 0xf3, 0x6b, 0xc6, 0x23, 0x50, 0x5b, 0x55, 0x6a, 0x17, 0xf7, 0x07, 0xe4, 0xe3, 0x21, 0x8d, - 0xe8, 0xb7, 0x06, 0xac, 0x30, 0x42, 0x4f, 0x09, 0xdd, 0x74, 0x1c, 0x4a, 0x18, 0x6b, 0xf4, 0xb6, + 0xe8, 0xd7, 0x06, 0xac, 0x30, 0x42, 0x4f, 0x09, 0xdd, 0x74, 0x1c, 0x4a, 0x18, 0x6b, 0xf4, 0xb6, 0x3c, 0x97, 0xf8, 0xe1, 0xd6, 0x4e, 0x13, 0xb3, 0xea, 0xb4, 0xd8, 0x87, 0xaf, 0x65, 0x33, 0xe8, 0x60, 0x9c, 0x9c, 0x86, 0xa9, 0x2c, 0x5a, 0x19, 0x4b, 0xc2, 0xf0, 0x03, 0xcc, 0x30, 0x8f, 0x60, 0x2e, 0x3a, 0xc8, 0xdb, 0x2e, 0x0b, 0xd1, 0x3d, 0x98, 0x69, 0xf1, 0x0f, 0x56, 0x35, 0x84, 0x81, @@ -1366,12 +1366,12 @@ var fileDescriptor_cf52fa777ced5367 = []byte{ 0xbf, 0x5a, 0x6a, 0xcc, 0xf3, 0xf5, 0x1e, 0xc4, 0x50, 0xac, 0x51, 0x70, 0x7a, 0xdb, 0x0a, 0x49, 0x2b, 0xa0, 0x2e, 0x61, 0xd5, 0xd9, 0x84, 0x7e, 0x2b, 0x86, 0x62, 0x8d, 0x02, 0x7d, 0x03, 0x10, 0x0b, 0x03, 0x6a, 0xb5, 0x88, 0x5a, 0xea, 0xcb, 0x16, 0x3b, 0xae, 0x82, 0x58, 0xdd, 0x8a, 0x5a, - 0x1d, 0x3a, 0x18, 0xa2, 0xc0, 0x23, 0xb8, 0xcc, 0xdf, 0x1b, 0xb0, 0xa0, 0xf9, 0x82, 0xf0, 0xbb, + 0x1d, 0x3a, 0x18, 0xa2, 0xc0, 0x23, 0xb8, 0xcc, 0xdf, 0x19, 0xb0, 0xa0, 0xf9, 0x82, 0xf0, 0xbb, 0xeb, 0x30, 0xd7, 0xd2, 0x6e, 0x9d, 0xf2, 0x8b, 0xf8, 0xb4, 0xf5, 0x1b, 0x89, 0x53, 0x94, 0x88, 0x40, 0x89, 0x2a, 0x49, 0x51, 0x74, 0xd9, 0xc8, 0xec, 0xb4, 0x91, 0x0d, 0x89, 0x26, 0x0d, 0xc8, 0x70, 0x22, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0x8a, 0x37, 0xe8, 0xaa, 0x16, 0xd3, 0x0c, 0xb1, - 0x7d, 0x73, 0x63, 0xe2, 0xd1, 0x05, 0x81, 0x20, 0xf7, 0x3f, 0x11, 0x08, 0x6e, 0x14, 0x7f, 0xf5, - 0x5e, 0x6d, 0xea, 0xed, 0xbf, 0xad, 0x4d, 0x99, 0xbf, 0x34, 0x60, 0x6e, 0xb3, 0xd3, 0xf1, 0x7a, + 0x7d, 0x73, 0x63, 0xe2, 0xd1, 0x05, 0x81, 0x20, 0xf7, 0x3f, 0x11, 0x08, 0x6e, 0x14, 0x7f, 0xf9, + 0x5e, 0x6d, 0xea, 0xed, 0xbf, 0xad, 0x4d, 0x99, 0xbf, 0x30, 0x60, 0x6e, 0xb3, 0xd3, 0xf1, 0x7a, 0x7b, 0x9d, 0x50, 0x2c, 0xc0, 0x84, 0x19, 0x87, 0xf6, 0x70, 0xd7, 0x57, 0x0b, 0x05, 0x7e, 0xbf, 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0xa3, 0x80, 0xda, 0x44, 0x5d, 0xb7, 0xf8, 0xfe, 0x6c, 0x73, 0x20, 0x96, 0x38, 0x7e, 0xc8, 0x47, 0x2e, 0xf1, 0x9c, 0x5d, 0xcb, 0xb7, 0x5a, 0x84, 0xaa, @@ -1384,125 +1384,128 @@ var fileDescriptor_cf52fa777ced5367 = []byte{ 0xbf, 0x98, 0xed, 0xc4, 0x39, 0x47, 0xa2, 0xf7, 0xf6, 0x90, 0x34, 0x3c, 0x42, 0x03, 0x7a, 0x02, 0x66, 0x28, 0xb1, 0x58, 0xe0, 0x57, 0x0b, 0x62, 0xf9, 0x71, 0x54, 0xc6, 0x02, 0x8a, 0x15, 0x96, 0x07, 0xb4, 0x36, 0x61, 0xcc, 0x6a, 0x45, 0xe1, 0x35, 0x0e, 0x68, 0xbb, 0x12, 0x8c, 0x23, 0xbc, - 0xd9, 0x86, 0xca, 0x16, 0x25, 0x56, 0x48, 0x26, 0xf1, 0x8a, 0x4f, 0x7f, 0xe0, 0x3f, 0xc9, 0x43, - 0xa5, 0x49, 0x3c, 0x92, 0xe8, 0xdb, 0x06, 0xd4, 0xa2, 0x96, 0x4d, 0xf6, 0x09, 0x75, 0x03, 0xe7, - 0x80, 0xd8, 0x81, 0xef, 0x30, 0xe1, 0x02, 0xf9, 0xc6, 0xff, 0xf1, 0xbd, 0xb9, 0x35, 0x84, 0xc5, - 0x23, 0x38, 0x90, 0x07, 0x95, 0x0e, 0x15, 0xbf, 0xc5, 0x7e, 0x49, 0x0f, 0x29, 0x5f, 0xfb, 0x72, - 0xb6, 0xe3, 0xd8, 0xd7, 0x59, 0x1b, 0x4b, 0x67, 0xfd, 0x5a, 0x25, 0x05, 0xc2, 0x69, 0xe1, 0xe8, - 0xeb, 0xb0, 0x18, 0xd0, 0xce, 0xb1, 0xe5, 0x37, 0x49, 0x87, 0xf8, 0x0e, 0xf1, 0x43, 0x26, 0x76, - 0xa1, 0xd8, 0x58, 0xe6, 0x75, 0xc4, 0xde, 0x00, 0x0e, 0x0f, 0x51, 0xa3, 0xd7, 0x60, 0xa9, 0x43, - 0x83, 0x8e, 0xd5, 0x12, 0x2e, 0xb5, 0x1f, 0x78, 0xae, 0xdd, 0x13, 0x2e, 0x54, 0x6a, 0x3c, 0x75, - 0xd6, 0xaf, 0x2d, 0xed, 0x0f, 0x22, 0xcf, 0xfb, 0xb5, 0x4b, 0x62, 0xeb, 0x38, 0x24, 0x41, 0xe2, - 0x61, 0x31, 0xda, 0x19, 0x16, 0xc6, 0x9d, 0xa1, 0xb9, 0x03, 0xc5, 0x66, 0x57, 0xf9, 0xf3, 0x8b, - 0x50, 0x74, 0xd4, 0x6f, 0xb5, 0xf3, 0xd1, 0xc5, 0x8a, 0x69, 0xce, 0xfb, 0xb5, 0x0a, 0x2f, 0x1d, - 0xeb, 0x11, 0x00, 0xc7, 0x2c, 0xe6, 0x13, 0x50, 0x14, 0x47, 0xce, 0xee, 0x6d, 0xa0, 0x45, 0xc8, - 0x63, 0xeb, 0xbe, 0x90, 0x32, 0x87, 0xf9, 0x4f, 0x2d, 0x02, 0xed, 0x01, 0xdc, 0x22, 0x61, 0x74, - 0xf0, 0x9b, 0xb0, 0x10, 0x85, 0xe1, 0x74, 0x76, 0xf8, 0x7f, 0xa5, 0x7b, 0x01, 0xa7, 0xd1, 0x78, - 0x90, 0xde, 0x7c, 0x1d, 0x4a, 0x22, 0x83, 0xf0, 0xf4, 0x9b, 0xa4, 0x7a, 0xe3, 0x01, 0xa9, 0x3e, - 0xca, 0xdf, 0xb9, 0x71, 0xf9, 0x5b, 0x33, 0xd7, 0x83, 0x8a, 0xe4, 0x8d, 0x8a, 0x9b, 0x4c, 0x1a, - 0x9e, 0x82, 0x62, 0x64, 0xa6, 0xd2, 0x12, 0x17, 0xb5, 0x91, 0x20, 0x1c, 0x53, 0x68, 0xda, 0x8e, - 0x21, 0x95, 0x0d, 0xb3, 0x29, 0xd3, 0x2a, 0x97, 0xdc, 0x83, 0x2b, 0x17, 0x4d, 0xd3, 0x0f, 0xa1, - 0x3a, 0xae, 0x12, 0x7e, 0x88, 0x7c, 0x9d, 0xdd, 0x14, 0xf3, 0x1d, 0x03, 0x16, 0x75, 0x49, 0xd9, - 0x8f, 0x2f, 0xbb, 0x92, 0x8b, 0x2b, 0x35, 0x6d, 0x47, 0x7e, 0x63, 0xc0, 0x72, 0x6a, 0x69, 0x13, - 0x9d, 0xf8, 0x04, 0x46, 0xe9, 0xce, 0x91, 0x9f, 0xc0, 0x39, 0xfe, 0x92, 0x83, 0xca, 0x6d, 0xeb, - 0x90, 0x78, 0x07, 0xc4, 0x23, 0x76, 0x18, 0x50, 0xf4, 0x03, 0x28, 0xb7, 0xad, 0xd0, 0x3e, 0x16, - 0xd0, 0xa8, 0xaa, 0x6f, 0x66, 0x0b, 0x76, 0x29, 0x49, 0xf5, 0xdd, 0x44, 0xcc, 0x4d, 0x3f, 0xa4, - 0xbd, 0xc6, 0x25, 0x65, 0x52, 0x59, 0xc3, 0x60, 0x5d, 0x9b, 0x68, 0xc5, 0xc4, 0xf7, 0xcd, 0xb7, - 0x3a, 0xbc, 0xe4, 0x98, 0xbc, 0x03, 0x4c, 0x99, 0x80, 0xc9, 0x9b, 0x5d, 0x97, 0x92, 0x36, 0xf1, - 0xc3, 0xa4, 0x15, 0xdb, 0x1d, 0x90, 0x8f, 0x87, 0x34, 0xae, 0xbc, 0x04, 0x8b, 0x83, 0xc6, 0xf3, - 0xf8, 0x73, 0x42, 0x7a, 0xf2, 0xbc, 0x30, 0xff, 0x89, 0x96, 0xa1, 0x70, 0x6a, 0x79, 0x5d, 0x75, - 0x1b, 0xb1, 0xfc, 0xb8, 0x91, 0xbb, 0x6e, 0x98, 0xbf, 0x33, 0xa0, 0x3a, 0xce, 0x10, 0xf4, 0x79, - 0x4d, 0x50, 0xa3, 0xac, 0xac, 0xca, 0xbf, 0x42, 0x7a, 0x52, 0xea, 0x4d, 0x28, 0x06, 0x1d, 0x5e, - 0x0f, 0x04, 0x54, 0x9d, 0xfa, 0x93, 0xd1, 0x49, 0xee, 0x29, 0xf8, 0x79, 0xbf, 0x76, 0x39, 0x25, - 0x3e, 0x42, 0xe0, 0x98, 0x95, 0x47, 0x6a, 0x61, 0x0f, 0xcf, 0x1e, 0x71, 0xa4, 0xbe, 0x27, 0x20, - 0x58, 0x61, 0xcc, 0x3f, 0x1a, 0x30, 0x2d, 0x8a, 0xe9, 0xd7, 0xa1, 0xc8, 0xf7, 0xcf, 0xb1, 0x42, - 0x4b, 0xd8, 0x95, 0xb9, 0x8d, 0xe3, 0xdc, 0xbb, 0x24, 0xb4, 0x12, 0x6f, 0x8b, 0x20, 0x38, 0x96, - 0x88, 0x30, 0x14, 0xdc, 0x90, 0xb4, 0xa3, 0x83, 0x7c, 0x7a, 0xac, 0x68, 0x35, 0x44, 0xa8, 0x63, - 0xeb, 0xfe, 0xcd, 0xb7, 0x42, 0xe2, 0xf3, 0xc3, 0x48, 0xae, 0xc6, 0x0e, 0x97, 0x81, 0xa5, 0x28, - 0xf3, 0x5f, 0x06, 0xc4, 0xaa, 0xb8, 0xf3, 0x33, 0xe2, 0x1d, 0xdd, 0x76, 0xfd, 0x13, 0xb5, 0xad, - 0xb1, 0x39, 0x07, 0x0a, 0x8e, 0x63, 0x8a, 0x51, 0xe9, 0x21, 0x37, 0x59, 0x7a, 0xe0, 0x0a, 0xed, - 0xc0, 0x0f, 0x5d, 0xbf, 0x3b, 0x74, 0xdb, 0xb6, 0x14, 0x1c, 0xc7, 0x14, 0xbc, 0x10, 0xa1, 0xa4, - 0x6d, 0xb9, 0xbe, 0xeb, 0xb7, 0xf8, 0x22, 0xb6, 0x82, 0xae, 0x1f, 0x8a, 0x8c, 0xac, 0x0a, 0x11, - 0x3c, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x3f, 0x4c, 0x43, 0x99, 0xaf, 0x39, 0xca, 0x73, 0x2f, 0x40, - 0xc5, 0xd3, 0xbd, 0x40, 0xad, 0xfd, 0xb2, 0x32, 0x25, 0x7d, 0xaf, 0x71, 0x9a, 0x96, 0x33, 0x8b, - 0xfa, 0x29, 0x66, 0xce, 0xa5, 0x99, 0xb7, 0x75, 0x24, 0x4e, 0xd3, 0xf2, 0xe8, 0x75, 0x9f, 0xdf, - 0x0f, 0x55, 0x99, 0xc4, 0x47, 0xf4, 0x4d, 0x0e, 0xc4, 0x12, 0x87, 0x76, 0xe1, 0x92, 0xe5, 0x79, - 0xc1, 0x7d, 0x01, 0x6c, 0x04, 0xc1, 0x49, 0xdb, 0xa2, 0x27, 0x4c, 0x34, 0xc2, 0xc5, 0xc6, 0xe7, - 0x14, 0xcb, 0xa5, 0xcd, 0x61, 0x12, 0x3c, 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0xc7, - 0xb0, 0x3c, 0x00, 0x12, 0xb7, 0x5c, 0x75, 0xa5, 0xcf, 0x2a, 0x39, 0xcb, 0x78, 0x04, 0xcd, 0xf9, - 0x18, 0x38, 0x1e, 0x29, 0x11, 0xdd, 0x80, 0x79, 0xee, 0xc9, 0x41, 0x37, 0x8c, 0xea, 0xce, 0x82, - 0x38, 0x6e, 0x74, 0xd6, 0xaf, 0xcd, 0xdf, 0x49, 0x61, 0xf0, 0x00, 0x25, 0xdf, 0x5c, 0xcf, 0x6d, - 0xbb, 0x61, 0x75, 0x56, 0xb0, 0xc4, 0x9b, 0x7b, 0x9b, 0x03, 0xb1, 0xc4, 0xa5, 0x3c, 0xb0, 0x78, - 0x91, 0x07, 0x9a, 0xbf, 0xce, 0x03, 0x92, 0x85, 0xb2, 0x23, 0xeb, 0x29, 0x19, 0xd2, 0x78, 0x35, - 0xaf, 0x0a, 0x6d, 0x63, 0xa0, 0x9a, 0x57, 0x35, 0x76, 0x84, 0x47, 0xbb, 0x50, 0x92, 0xa1, 0x25, - 0xb9, 0x2e, 0xeb, 0x8a, 0xb8, 0xb4, 0x17, 0x21, 0xce, 0xfb, 0xb5, 0x95, 0x94, 0x9a, 0x18, 0x23, - 0x3a, 0xad, 0x44, 0x02, 0xba, 0x06, 0x60, 0x75, 0x5c, 0x7d, 0xd6, 0x56, 0x4a, 0x26, 0x2e, 0x49, - 0xd7, 0x8c, 0x35, 0x2a, 0xf4, 0x32, 0x4c, 0x87, 0x9f, 0xae, 0x1b, 0x2a, 0x8a, 0x66, 0x8f, 0xf7, - 0x3e, 0x42, 0x02, 0xd7, 0x2e, 0xfc, 0x99, 0x71, 0xb3, 0x54, 0x23, 0x13, 0x6b, 0xdf, 0x8e, 0x31, - 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xe2, 0x91, 0x2a, 0x45, 0xc5, 0xc1, 0x64, 0x0e, 0x91, 0x51, 0x01, - 0x2b, 0xdb, 0xfd, 0xe8, 0x0b, 0xc7, 0xd2, 0xcc, 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10, 0x8d, - 0xd8, 0x93, 0x30, 0xcb, 0x52, 0x9d, 0x4a, 0x7c, 0x24, 0x91, 0xbb, 0x44, 0x78, 0xee, 0x27, 0xbe, - 0xe5, 0x07, 0xb2, 0x1f, 0x29, 0x24, 0x7e, 0xf2, 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc, 0x33, - 0xfd, 0x4f, 0xdf, 0xaf, 0x4d, 0xbd, 0xfb, 0x7e, 0x6d, 0xea, 0xbd, 0xf7, 0x55, 0xd6, 0x3f, 0x07, - 0x80, 0xbd, 0xc3, 0xef, 0x11, 0x5b, 0xc6, 0xcf, 0x4c, 0xb3, 0xb5, 0x68, 0xa4, 0x2b, 0x66, 0x6b, - 0xb9, 0x81, 0xea, 0x4d, 0xc3, 0xe1, 0x14, 0x25, 0x5a, 0x87, 0x52, 0x3c, 0x35, 0x53, 0x07, 0xbd, - 0x14, 0x39, 0x4e, 0x3c, 0x5a, 0xc3, 0x09, 0x4d, 0x2a, 0x98, 0x4f, 0x5f, 0x18, 0xcc, 0x1b, 0x90, - 0xef, 0xba, 0x8e, 0xea, 0x5a, 0x9f, 0x89, 0x92, 0xe9, 0xdd, 0x9d, 0xe6, 0x79, 0xbf, 0xf6, 0xd8, - 0xb8, 0x61, 0x35, 0xef, 0xf8, 0x59, 0xfd, 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x91, 0x65, 0x66, - 0xc2, 0xc8, 0x72, 0x0d, 0xa0, 0x95, 0xf4, 0xfe, 0xf2, 0xe2, 0xc6, 0x1e, 0xa5, 0xf5, 0xfc, 0x1a, - 0x15, 0x62, 0xb0, 0x64, 0xf3, 0x06, 0x59, 0xf5, 0xe0, 0x2c, 0xb4, 0xda, 0x72, 0x9a, 0x38, 0x99, - 0x73, 0x5f, 0x51, 0x6a, 0x96, 0xb6, 0x06, 0x85, 0xe1, 0x61, 0xf9, 0x28, 0x80, 0x25, 0x47, 0xb5, - 0x7a, 0x89, 0xd2, 0xd2, 0xc4, 0x4a, 0x2f, 0x73, 0x85, 0xcd, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, - 0x2e, 0xac, 0x44, 0xc0, 0xe1, 0x7e, 0x5b, 0x44, 0xde, 0x7c, 0x63, 0xf5, 0xac, 0x5f, 0x5b, 0x69, - 0x8e, 0xa5, 0xc2, 0x0f, 0x90, 0x80, 0x1c, 0x98, 0xf1, 0x64, 0xa5, 0x5a, 0x16, 0xd5, 0xc5, 0x57, - 0xb3, 0xad, 0x22, 0xf1, 0xfe, 0xba, 0x5e, 0xa1, 0xc6, 0x73, 0x0f, 0x55, 0x9c, 0x2a, 0xd9, 0xe8, - 0x2d, 0x28, 0x5b, 0xbe, 0x1f, 0x84, 0x96, 0x9c, 0x00, 0xcc, 0x09, 0x55, 0x9b, 0x13, 0xab, 0xda, - 0x4c, 0x64, 0x0c, 0x54, 0xc4, 0x1a, 0x06, 0xeb, 0xaa, 0xd0, 0x7d, 0x58, 0x08, 0xee, 0xfb, 0x84, - 0x62, 0x72, 0x44, 0x28, 0xf1, 0x6d, 0xc2, 0xaa, 0x15, 0xa1, 0xfd, 0xd9, 0x8c, 0xda, 0x53, 0xcc, - 0x89, 0x4b, 0xa7, 0xe1, 0x0c, 0x0f, 0x6a, 0x41, 0x75, 0x1e, 0x24, 0x7d, 0xcb, 0x73, 0xbf, 0x4f, - 0x28, 0xab, 0xce, 0x27, 0x03, 0xdf, 0xed, 0x18, 0x8a, 0x35, 0x0a, 0xf4, 0x15, 0x28, 0xdb, 0x5e, - 0x97, 0x85, 0x44, 0x4e, 0xdf, 0x17, 0xc4, 0x0d, 0x8a, 0xd7, 0xb7, 0x95, 0xa0, 0xb0, 0x4e, 0x87, - 0xba, 0x50, 0x69, 0xeb, 0x29, 0xa3, 0xba, 0x24, 0x56, 0x77, 0x3d, 0xdb, 0xea, 0x86, 0x93, 0x5a, - 0x52, 0xc1, 0xa4, 0x70, 0x38, 0xad, 0x65, 0xe5, 0x79, 0x28, 0x7f, 0xca, 0xe2, 0x9e, 0x37, 0x07, - 0x83, 0xe7, 0x38, 0x51, 0x73, 0xf0, 0xa7, 0x1c, 0xcc, 0xa7, 0x77, 0x7f, 0x20, 0x1d, 0x16, 0x32, - 0xa5, 0xc3, 0xa8, 0x0d, 0x35, 0xc6, 0x3e, 0x18, 0x44, 0x61, 0x3d, 0x3f, 0x36, 0xac, 0xab, 0xe8, - 0x39, 0xfd, 0x30, 0xd1, 0xb3, 0x0e, 0xc0, 0xeb, 0x0c, 0x1a, 0x78, 0x1e, 0xa1, 0x22, 0x70, 0x16, - 0xd5, 0xc3, 0x40, 0x0c, 0xc5, 0x1a, 0x05, 0xaf, 0x86, 0x0f, 0xbd, 0xc0, 0x3e, 0x11, 0x5b, 0x10, - 0x5d, 0x7a, 0x11, 0x32, 0x8b, 0xb2, 0x1a, 0x6e, 0x0c, 0x61, 0xf1, 0x08, 0x0e, 0xb3, 0x07, 0x97, - 0xf7, 0x2d, 0x1a, 0xba, 0x96, 0x97, 0x5c, 0x30, 0xd1, 0x6e, 0xbc, 0x31, 0xd4, 0xcc, 0x3c, 0x33, - 0xe9, 0x45, 0x4d, 0x36, 0x3f, 0x81, 0x25, 0x0d, 0x8d, 0xf9, 0x57, 0x03, 0xae, 0x8c, 0xd4, 0xfd, - 0x19, 0x34, 0x53, 0x6f, 0xa4, 0x9b, 0xa9, 0x17, 0x32, 0x4e, 0x21, 0x47, 0x59, 0x3b, 0xa6, 0xb5, - 0x9a, 0x85, 0xc2, 0x3e, 0x2f, 0x62, 0xcd, 0x5f, 0x18, 0x30, 0x27, 0x7e, 0x4d, 0x32, 0xc1, 0xad, - 0xa5, 0xe7, 0xfa, 0xa5, 0x47, 0x38, 0xd3, 0x7f, 0xc7, 0x80, 0xf4, 0xec, 0x14, 0xbd, 0x24, 0xfd, - 0xd7, 0x88, 0x87, 0x9b, 0x13, 0xfa, 0xee, 0x8b, 0xe3, 0x5a, 0xc1, 0x4b, 0x99, 0xa6, 0x84, 0x4f, - 0x41, 0x09, 0x07, 0x41, 0xb8, 0x6f, 0x85, 0xc7, 0x8c, 0x2f, 0xbc, 0xc3, 0x7f, 0xa8, 0xbd, 0x11, - 0x0b, 0x17, 0x18, 0x2c, 0xe1, 0xe6, 0xcf, 0x0d, 0xb8, 0x32, 0xf6, 0xad, 0x85, 0x87, 0x00, 0x3b, - 0xfe, 0x52, 0x2b, 0x8a, 0xbd, 0x30, 0xa1, 0xc3, 0x1a, 0x15, 0xef, 0xe1, 0x52, 0x0f, 0x34, 0x83, - 0x3d, 0x5c, 0x4a, 0x1b, 0x4e, 0xd3, 0x9a, 0xff, 0xcc, 0x81, 0x7a, 0xdc, 0xf8, 0x2f, 0x7b, 0xec, - 0x13, 0x03, 0x4f, 0x2b, 0xf3, 0xe9, 0xa7, 0x95, 0xf8, 0x1d, 0x45, 0x7b, 0x5b, 0xc8, 0x3f, 0xf8, - 0x6d, 0x01, 0x3d, 0x17, 0x3f, 0x57, 0xc8, 0xd0, 0xb5, 0x9a, 0x7e, 0xae, 0x38, 0xef, 0xd7, 0xe6, - 0x94, 0xf0, 0xf4, 0xf3, 0xc5, 0x6b, 0x30, 0xeb, 0x90, 0xd0, 0x72, 0x3d, 0xd9, 0x8f, 0x65, 0x1e, - 0xe2, 0x4b, 0x61, 0x4d, 0xc9, 0xda, 0x28, 0x73, 0x9b, 0xd4, 0x07, 0x8e, 0x04, 0xf2, 0x68, 0x6b, - 0x07, 0x8e, 0x6c, 0x27, 0x0a, 0x49, 0xb4, 0xdd, 0x0a, 0x1c, 0x82, 0x05, 0xc6, 0x7c, 0xd7, 0x80, - 0xb2, 0x94, 0xb4, 0x65, 0x75, 0x19, 0x41, 0x1b, 0xf1, 0x2a, 0xe4, 0x71, 0x5f, 0xd1, 0xdf, 0xa5, - 0xce, 0xfb, 0xb5, 0x92, 0x20, 0x13, 0x9d, 0xc8, 0x88, 0xf7, 0x97, 0xdc, 0x05, 0x7b, 0xf4, 0x38, - 0x14, 0xc4, 0xed, 0x51, 0x9b, 0x99, 0x3c, 0xb0, 0x71, 0x20, 0x96, 0x38, 0xf3, 0xe3, 0x1c, 0x54, - 0x52, 0x8b, 0xcb, 0xd0, 0x0b, 0xc4, 0xa3, 0xcb, 0x5c, 0x86, 0x71, 0xf8, 0xf8, 0xe7, 0x6c, 0x95, - 0x7b, 0x66, 0x1e, 0x26, 0xf7, 0x7c, 0x1b, 0x66, 0x6c, 0xbe, 0x47, 0xd1, 0xbf, 0x23, 0x36, 0x26, - 0x39, 0x4e, 0xb1, 0xbb, 0x89, 0x37, 0x8a, 0x4f, 0x86, 0x95, 0x40, 0x74, 0x0b, 0x96, 0x28, 0x09, - 0x69, 0x6f, 0xf3, 0x28, 0x24, 0x54, 0x6f, 0xe2, 0x0b, 0x49, 0xc5, 0x8d, 0x07, 0x09, 0xf0, 0x30, - 0x8f, 0x79, 0x08, 0x73, 0x77, 0xac, 0x43, 0x2f, 0x7e, 0x96, 0xc2, 0x50, 0x71, 0x7d, 0xdb, 0xeb, - 0x3a, 0x44, 0x46, 0xe3, 0x28, 0x7a, 0x45, 0x97, 0x76, 0x47, 0x47, 0x9e, 0xf7, 0x6b, 0x97, 0x52, - 0x00, 0xf9, 0x0e, 0x83, 0xd3, 0x22, 0x4c, 0x0f, 0xa6, 0x3f, 0xc3, 0xee, 0xf1, 0x3b, 0x50, 0x4a, - 0xea, 0xfb, 0x47, 0xac, 0xd2, 0x7c, 0x03, 0x8a, 0xdc, 0xe3, 0xa3, 0xbe, 0xf4, 0x82, 0x12, 0x27, - 0x5d, 0x38, 0xe5, 0xb2, 0x14, 0x4e, 0x66, 0x1b, 0x2a, 0x77, 0x3b, 0xce, 0x43, 0x3e, 0x4c, 0xe6, - 0x32, 0x67, 0xad, 0x6b, 0x20, 0xff, 0x78, 0xc1, 0x13, 0x84, 0xcc, 0xdc, 0x5a, 0x82, 0xd0, 0x13, - 0xaf, 0x36, 0x95, 0xff, 0xb1, 0x01, 0x20, 0xc6, 0x5f, 0x37, 0x4f, 0x89, 0x1f, 0x66, 0x78, 0xbe, - 0xbe, 0x0b, 0x33, 0x81, 0xf4, 0x26, 0xf9, 0x38, 0x39, 0xe1, 0x8c, 0x35, 0xbe, 0x04, 0xd2, 0x9f, - 0xb0, 0x12, 0xd6, 0xb8, 0xfa, 0xc1, 0x27, 0xab, 0x53, 0x1f, 0x7e, 0xb2, 0x3a, 0xf5, 0xd1, 0x27, - 0xab, 0x53, 0x6f, 0x9f, 0xad, 0x1a, 0x1f, 0x9c, 0xad, 0x1a, 0x1f, 0x9e, 0xad, 0x1a, 0x1f, 0x9d, - 0xad, 0x1a, 0x1f, 0x9f, 0xad, 0x1a, 0xef, 0xfe, 0x7d, 0x75, 0xea, 0xb5, 0xdc, 0xe9, 0xc6, 0x7f, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xee, 0x35, 0x7b, 0xee, 0x26, 0x00, 0x00, + 0xf9, 0x5b, 0x03, 0x2a, 0x5b, 0x94, 0x58, 0x21, 0x99, 0xc4, 0x2d, 0x3e, 0xf5, 0x89, 0xa3, 0x4d, + 0x58, 0x10, 0xdf, 0xf7, 0x2c, 0xcf, 0x75, 0xe4, 0x19, 0x4c, 0x0b, 0xe6, 0xff, 0x57, 0xcc, 0x0b, + 0xdb, 0x69, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x87, 0x4a, 0x93, 0x78, 0x24, 0x31, 0x79, 0x1b, + 0x50, 0x8b, 0x5a, 0x36, 0xd9, 0x27, 0xd4, 0x0d, 0x9c, 0x03, 0x62, 0x07, 0xbe, 0xc3, 0x84, 0x1b, + 0xe5, 0x1b, 0xff, 0xc7, 0xf7, 0xf7, 0xd6, 0x10, 0x16, 0x8f, 0xe0, 0x40, 0x1e, 0x54, 0x3a, 0x54, + 0xfc, 0x16, 0x7b, 0x2e, 0xbd, 0xac, 0x7c, 0xed, 0xcb, 0xd9, 0x8e, 0x74, 0x5f, 0x67, 0x6d, 0x2c, + 0x9d, 0xf5, 0x6b, 0x95, 0x14, 0x08, 0xa7, 0x85, 0xa3, 0xaf, 0xc3, 0x62, 0x40, 0x3b, 0xc7, 0x96, + 0xdf, 0x24, 0x1d, 0xe2, 0x3b, 0xc4, 0x0f, 0x99, 0xd8, 0xc8, 0x62, 0x63, 0x99, 0xd7, 0x22, 0x7b, + 0x03, 0x38, 0x3c, 0x44, 0x8d, 0x5e, 0x83, 0xa5, 0x0e, 0x0d, 0x3a, 0x56, 0x4b, 0x6c, 0xcc, 0x7e, + 0xe0, 0xb9, 0x76, 0x4f, 0x6d, 0xe7, 0x53, 0x67, 0xfd, 0xda, 0xd2, 0xfe, 0x20, 0xf2, 0xbc, 0x5f, + 0xbb, 0x24, 0xb6, 0x8e, 0x43, 0x12, 0x24, 0x1e, 0x16, 0xa3, 0xb9, 0x41, 0x61, 0x9c, 0x1b, 0x98, + 0x3b, 0x50, 0x6c, 0x76, 0xd5, 0x9d, 0x78, 0x11, 0x8a, 0x8e, 0xfa, 0xad, 0x76, 0x3e, 0xba, 0x9c, + 0x31, 0xcd, 0x79, 0xbf, 0x56, 0xe1, 0xe5, 0x67, 0x3d, 0x02, 0xe0, 0x98, 0xc5, 0x7c, 0x02, 0x8a, + 0xe2, 0xe0, 0xd9, 0xbd, 0x0d, 0xb4, 0x08, 0x79, 0x6c, 0xdd, 0x17, 0x52, 0xe6, 0x30, 0xff, 0xa9, + 0x45, 0xb1, 0x3d, 0x80, 0x5b, 0x24, 0x8c, 0x0e, 0x7e, 0x13, 0x16, 0xa2, 0x50, 0x9e, 0xce, 0x30, + 0xb1, 0x37, 0xe1, 0x34, 0x1a, 0x0f, 0xd2, 0x9b, 0xaf, 0x43, 0x49, 0x64, 0x21, 0x9e, 0xc2, 0x93, + 0x72, 0xc1, 0x78, 0x40, 0xb9, 0x10, 0xd5, 0x00, 0xb9, 0x71, 0x35, 0x80, 0x66, 0xae, 0x07, 0x15, + 0xc9, 0x1b, 0x15, 0x48, 0x99, 0x34, 0x3c, 0x05, 0xc5, 0xc8, 0x4c, 0xa5, 0x25, 0x2e, 0x8c, 0x23, + 0x41, 0x38, 0xa6, 0xd0, 0xb4, 0x1d, 0x43, 0x2a, 0xa3, 0x66, 0x53, 0xa6, 0x55, 0x3f, 0xb9, 0x07, + 0x57, 0x3f, 0x9a, 0xa6, 0x1f, 0x42, 0x75, 0x5c, 0x35, 0xfd, 0x10, 0x39, 0x3f, 0xbb, 0x29, 0xe6, + 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xb2, 0x1f, 0x5f, 0x76, 0x25, 0x17, 0x57, 0x7b, 0xda, 0x8e, 0xfc, + 0xca, 0x80, 0xe5, 0xd4, 0xd2, 0x26, 0x3a, 0xf1, 0x09, 0x8c, 0xd2, 0x9d, 0x23, 0x3f, 0x81, 0x73, + 0xfc, 0x25, 0x07, 0x95, 0xdb, 0xd6, 0x21, 0xf1, 0x0e, 0x88, 0x47, 0xec, 0x30, 0xa0, 0xe8, 0x07, + 0x50, 0x6e, 0x5b, 0xa1, 0x7d, 0x2c, 0xa0, 0x51, 0x67, 0xd0, 0xcc, 0x16, 0xec, 0x52, 0x92, 0xea, + 0xbb, 0x89, 0x98, 0x9b, 0x7e, 0x48, 0x7b, 0x8d, 0x4b, 0xca, 0xa4, 0xb2, 0x86, 0xc1, 0xba, 0x36, + 0xd1, 0xce, 0x89, 0xef, 0x9b, 0x6f, 0x75, 0x78, 0xd9, 0x32, 0x79, 0x17, 0x99, 0x32, 0x01, 0x93, + 0x37, 0xbb, 0x2e, 0x25, 0x6d, 0xe2, 0x87, 0x49, 0x3b, 0xb7, 0x3b, 0x20, 0x1f, 0x0f, 0x69, 0x5c, + 0x79, 0x09, 0x16, 0x07, 0x8d, 0xe7, 0xf1, 0xe7, 0x84, 0xf4, 0xe4, 0x79, 0x61, 0xfe, 0x13, 0x2d, + 0x43, 0xe1, 0xd4, 0xf2, 0xba, 0xea, 0x36, 0x62, 0xf9, 0x71, 0x23, 0x77, 0xdd, 0x30, 0x7f, 0x63, + 0x40, 0x75, 0x9c, 0x21, 0xe8, 0xf3, 0x9a, 0xa0, 0x46, 0x59, 0x59, 0x95, 0x7f, 0x85, 0xf4, 0xa4, + 0xd4, 0x9b, 0x50, 0x0c, 0x3a, 0xbc, 0xa6, 0x08, 0xa8, 0x3a, 0xf5, 0x27, 0xa3, 0x93, 0xdc, 0x53, + 0xf0, 0xf3, 0x7e, 0xed, 0x72, 0x4a, 0x7c, 0x84, 0xc0, 0x31, 0x2b, 0x8f, 0xd4, 0xc2, 0x1e, 0x9e, + 0x3d, 0xe2, 0x48, 0x7d, 0x4f, 0x40, 0xb0, 0xc2, 0x98, 0x7f, 0x30, 0x60, 0x5a, 0x14, 0xe4, 0xaf, + 0x43, 0x91, 0xef, 0x9f, 0x63, 0x85, 0x96, 0xb0, 0x2b, 0x73, 0x2b, 0xc8, 0xb9, 0x77, 0x49, 0x68, + 0x25, 0xde, 0x16, 0x41, 0x70, 0x2c, 0x11, 0x61, 0x28, 0xb8, 0x21, 0x69, 0x47, 0x07, 0xf9, 0xf4, + 0x58, 0xd1, 0x6a, 0x10, 0x51, 0xc7, 0xd6, 0xfd, 0x9b, 0x6f, 0x85, 0xc4, 0xe7, 0x87, 0x91, 0x5c, + 0x8d, 0x1d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x55, 0x71, 0xe7, 0x67, 0xc4, 0x3b, + 0xba, 0xed, 0xfa, 0x27, 0x6a, 0x5b, 0x63, 0x73, 0x0e, 0x14, 0x1c, 0xc7, 0x14, 0xa3, 0xd2, 0x43, + 0x6e, 0xb2, 0xf4, 0xc0, 0x15, 0xda, 0x81, 0x1f, 0xba, 0x7e, 0x77, 0xe8, 0xb6, 0x6d, 0x29, 0x38, + 0x8e, 0x29, 0x78, 0x21, 0x42, 0x49, 0xdb, 0x72, 0x7d, 0xd7, 0x6f, 0xf1, 0x45, 0x6c, 0x05, 0x5d, + 0x3f, 0x14, 0x19, 0x59, 0x15, 0x22, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xbf, 0x9f, 0x86, 0x32, + 0x5f, 0x73, 0x94, 0xe7, 0x5e, 0x80, 0x8a, 0xa7, 0x7b, 0x81, 0x5a, 0xfb, 0x65, 0x65, 0x4a, 0xfa, + 0x5e, 0xe3, 0x34, 0x2d, 0x67, 0x16, 0x25, 0x54, 0xcc, 0x9c, 0x4b, 0x33, 0x6f, 0xeb, 0x48, 0x9c, + 0xa6, 0xe5, 0xd1, 0xeb, 0x3e, 0xbf, 0x1f, 0xaa, 0x32, 0x89, 0x8f, 0xe8, 0x9b, 0x1c, 0x88, 0x25, + 0x0e, 0xed, 0xc2, 0x25, 0xcb, 0xf3, 0x82, 0xfb, 0x02, 0xd8, 0x08, 0x82, 0x93, 0xb6, 0x45, 0x4f, + 0x98, 0x68, 0xa6, 0x8b, 0x8d, 0xcf, 0x29, 0x96, 0x4b, 0x9b, 0xc3, 0x24, 0x78, 0x14, 0xdf, 0xa8, + 0x63, 0x9b, 0x9e, 0xf0, 0xd8, 0x8e, 0x61, 0x79, 0x00, 0x24, 0x6e, 0xb9, 0xea, 0x6c, 0x9f, 0x55, + 0x72, 0x96, 0xf1, 0x08, 0x9a, 0xf3, 0x31, 0x70, 0x3c, 0x52, 0x22, 0xba, 0x01, 0xf3, 0xdc, 0x93, + 0x83, 0x6e, 0x18, 0xd5, 0x9d, 0x05, 0x71, 0xdc, 0xe8, 0xac, 0x5f, 0x9b, 0xbf, 0x93, 0xc2, 0xe0, + 0x01, 0x4a, 0xbe, 0xb9, 0x9e, 0xdb, 0x76, 0xc3, 0xea, 0xac, 0x60, 0x89, 0x37, 0xf7, 0x36, 0x07, + 0x62, 0x89, 0x4b, 0x79, 0x60, 0xf1, 0x22, 0x0f, 0x34, 0xff, 0x9c, 0x07, 0x24, 0x6b, 0x6d, 0x47, + 0xd6, 0x53, 0x32, 0xa4, 0xf1, 0x8e, 0x40, 0xd5, 0xea, 0xc6, 0x40, 0x47, 0xa0, 0xca, 0xf4, 0x08, + 0x8f, 0x76, 0xa1, 0x24, 0x43, 0x4b, 0x72, 0x5d, 0xd6, 0x15, 0x71, 0x69, 0x2f, 0x42, 0x9c, 0xf7, + 0x6b, 0x2b, 0x29, 0x35, 0x31, 0x46, 0x74, 0x6b, 0x89, 0x04, 0x74, 0x0d, 0xc0, 0xea, 0xb8, 0xfa, + 0xbc, 0xae, 0x94, 0x4c, 0x6d, 0x92, 0xce, 0x1b, 0x6b, 0x54, 0xe8, 0x65, 0x98, 0x0e, 0x3f, 0x5d, + 0x47, 0x55, 0x14, 0x0d, 0x23, 0xef, 0x9f, 0x84, 0x04, 0xae, 0x5d, 0xf8, 0x33, 0xe3, 0x66, 0xa9, + 0x66, 0x28, 0xd6, 0xbe, 0x1d, 0x63, 0xb0, 0x46, 0x85, 0xbe, 0x05, 0xc5, 0x23, 0x55, 0x8a, 0x8a, + 0x83, 0xc9, 0x1c, 0x22, 0xa3, 0x02, 0x56, 0x8e, 0x0c, 0xa2, 0x2f, 0x1c, 0x4b, 0x43, 0x5f, 0x81, + 0x32, 0xeb, 0x1e, 0xc6, 0xd9, 0x5b, 0x9e, 0x66, 0x9c, 0x2a, 0x0f, 0x12, 0x14, 0xd6, 0xe9, 0xcc, + 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10, 0x3d, 0xe0, 0x93, 0x30, 0xcb, 0x52, 0x0d, 0x4e, 0x7c, + 0x92, 0x91, 0x97, 0x45, 0x78, 0xee, 0x5e, 0xbe, 0xe5, 0x07, 0xb2, 0x8d, 0x29, 0x24, 0xee, 0xf5, + 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc, 0x0b, 0x84, 0x9f, 0xbe, 0x5f, 0x9b, 0x7a, 0xf7, 0xfd, + 0xda, 0xd4, 0x7b, 0xef, 0xab, 0x62, 0xe1, 0x1c, 0x00, 0xf6, 0x0e, 0xbf, 0x47, 0x6c, 0x19, 0x76, + 0x33, 0x8d, 0xf5, 0xa2, 0x69, 0xb2, 0x18, 0xeb, 0xe5, 0x06, 0x8a, 0x3e, 0x0d, 0x87, 0x53, 0x94, + 0x68, 0x1d, 0x4a, 0xf1, 0xc0, 0x4e, 0xf9, 0xc7, 0x52, 0xe4, 0x6f, 0xf1, 0x54, 0x0f, 0x27, 0x34, + 0xa9, 0x1c, 0x30, 0x7d, 0x61, 0x0e, 0x68, 0x40, 0xbe, 0xeb, 0x3a, 0xaa, 0x61, 0x7e, 0x26, 0xca, + 0xc1, 0x77, 0x77, 0x9a, 0xe7, 0xfd, 0xda, 0x63, 0xe3, 0xe6, 0xe4, 0x61, 0xaf, 0x43, 0x58, 0xfd, + 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x01, 0x69, 0x66, 0xc2, 0x80, 0x74, 0x0d, 0xa0, 0x95, 0x8c, + 0x1d, 0xe4, 0x7d, 0x8f, 0x1d, 0x51, 0x1b, 0x37, 0x68, 0x54, 0x88, 0xc1, 0x92, 0xcd, 0x5b, 0x73, + 0xd5, 0xfe, 0xb3, 0xd0, 0x6a, 0xcb, 0x41, 0xe6, 0x64, 0x77, 0xe2, 0x8a, 0x52, 0xb3, 0xb4, 0x35, + 0x28, 0x0c, 0x0f, 0xcb, 0x47, 0x01, 0x2c, 0x39, 0xaa, 0x43, 0x4c, 0x94, 0x96, 0x26, 0x56, 0x7a, + 0x99, 0x2b, 0x6c, 0x0e, 0x0a, 0xc2, 0xc3, 0xb2, 0xd1, 0x77, 0x61, 0x25, 0x02, 0x0e, 0xb7, 0xe9, + 0x22, 0x60, 0xe7, 0x1b, 0xab, 0x67, 0xfd, 0xda, 0x4a, 0x73, 0x2c, 0x15, 0x7e, 0x80, 0x04, 0xe4, + 0xc0, 0x8c, 0x27, 0x0b, 0xdc, 0xb2, 0x28, 0x4a, 0xbe, 0x9a, 0x6d, 0x15, 0x89, 0xf7, 0xd7, 0xf5, + 0xc2, 0x36, 0x1e, 0xb9, 0xa8, 0x9a, 0x56, 0xc9, 0x46, 0x6f, 0x41, 0xd9, 0xf2, 0xfd, 0x20, 0xb4, + 0xe4, 0xe0, 0x60, 0x4e, 0xa8, 0xda, 0x9c, 0x58, 0xd5, 0x66, 0x22, 0x63, 0xa0, 0x90, 0xd6, 0x30, + 0x58, 0x57, 0x85, 0xee, 0xc3, 0x42, 0x70, 0xdf, 0x27, 0x14, 0x93, 0x23, 0x42, 0x89, 0x6f, 0x13, + 0x56, 0xad, 0x08, 0xed, 0xcf, 0x66, 0xd4, 0x9e, 0x62, 0x4e, 0x5c, 0x3a, 0x0d, 0x67, 0x78, 0x50, + 0x0b, 0xaa, 0xf3, 0xd8, 0xea, 0x5b, 0x9e, 0xfb, 0x7d, 0x42, 0x59, 0x75, 0x3e, 0x99, 0x35, 0x6f, + 0xc7, 0x50, 0xac, 0x51, 0xf0, 0xe8, 0x67, 0x7b, 0x5d, 0x16, 0x12, 0x39, 0xf8, 0x5f, 0x48, 0x47, + 0xbf, 0xad, 0x04, 0x85, 0x75, 0x3a, 0xd4, 0x85, 0x4a, 0x5b, 0xcf, 0x34, 0xd5, 0x25, 0xb1, 0xba, + 0xeb, 0xd9, 0x56, 0x37, 0x9c, 0x0b, 0x93, 0xc2, 0x27, 0x85, 0xc3, 0x69, 0x2d, 0x2b, 0xcf, 0x43, + 0xf9, 0x53, 0xf6, 0x04, 0xbc, 0xa7, 0x18, 0x3c, 0xc7, 0x89, 0x7a, 0x8a, 0x3f, 0xe6, 0x60, 0x3e, + 0xbd, 0xfb, 0x03, 0x59, 0xb4, 0x90, 0x29, 0x8b, 0x46, 0xdd, 0xab, 0x31, 0xf6, 0xad, 0x22, 0x0a, + 0xeb, 0xf9, 0xb1, 0x61, 0x5d, 0x45, 0xcf, 0xe9, 0x87, 0x89, 0x9e, 0x75, 0x00, 0x5e, 0x9e, 0xd0, + 0xc0, 0xf3, 0x08, 0x15, 0x81, 0xb3, 0xa8, 0xde, 0x24, 0x62, 0x28, 0xd6, 0x28, 0x78, 0x11, 0x7d, + 0xe8, 0x05, 0xf6, 0x89, 0xd8, 0x82, 0xe8, 0xd2, 0x8b, 0x90, 0x59, 0x94, 0x45, 0x74, 0x63, 0x08, + 0x8b, 0x47, 0x70, 0x98, 0x3d, 0xb8, 0xbc, 0x6f, 0xd1, 0xd0, 0xb5, 0xbc, 0xe4, 0x82, 0x89, 0x2e, + 0xe5, 0x8d, 0xa1, 0x1e, 0xe8, 0x99, 0x49, 0x2f, 0x6a, 0xb2, 0xf9, 0x09, 0x2c, 0xe9, 0x83, 0xcc, + 0xbf, 0x1a, 0x70, 0x65, 0xa4, 0xee, 0xcf, 0xa0, 0x07, 0x7b, 0x23, 0xdd, 0x83, 0xbd, 0x90, 0x71, + 0x78, 0x39, 0xca, 0xda, 0x31, 0x1d, 0xd9, 0x2c, 0x14, 0xf6, 0x79, 0xed, 0x6b, 0x7e, 0x68, 0xc0, + 0x9c, 0xf8, 0x35, 0xc9, 0xec, 0xb8, 0x96, 0x7e, 0x52, 0x28, 0x3d, 0xba, 0xe7, 0x84, 0x47, 0x31, + 0x5c, 0x7e, 0xc7, 0x80, 0xf4, 0xd4, 0x16, 0xbd, 0x24, 0xaf, 0x80, 0x11, 0x8f, 0x55, 0x27, 0x74, + 0xff, 0x17, 0xc7, 0x35, 0xa1, 0x97, 0x32, 0xcd, 0x27, 0x9f, 0x82, 0x12, 0x0e, 0x82, 0x70, 0xdf, + 0x0a, 0x8f, 0x19, 0xdf, 0xbb, 0x0e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, + 0x7f, 0x6e, 0xc0, 0x95, 0xb1, 0x2f, 0x45, 0x3c, 0x8a, 0xd8, 0xf1, 0x97, 0x5a, 0x51, 0xec, 0xc8, + 0x09, 0x1d, 0xd6, 0xa8, 0x78, 0xf7, 0x98, 0x7a, 0x5e, 0x1a, 0xec, 0x1e, 0x53, 0xda, 0x70, 0x9a, + 0xd6, 0xfc, 0x67, 0x0e, 0xd4, 0xd3, 0xcc, 0x7f, 0xd9, 0xe9, 0x9f, 0x18, 0x78, 0x18, 0x9a, 0x4f, + 0x3f, 0x0c, 0xc5, 0xaf, 0x40, 0xda, 0xcb, 0x48, 0xfe, 0xc1, 0x2f, 0x23, 0xe8, 0xb9, 0xf8, 0xb1, + 0x45, 0xfa, 0xd0, 0x6a, 0xfa, 0xb1, 0xe5, 0xbc, 0x5f, 0x9b, 0x53, 0xc2, 0xd3, 0x8f, 0x2f, 0xaf, + 0xc1, 0xac, 0x43, 0x42, 0xcb, 0xf5, 0x64, 0x27, 0x98, 0xf9, 0xf9, 0x40, 0x0a, 0x6b, 0x4a, 0xd6, + 0x46, 0x99, 0xdb, 0xa4, 0x3e, 0x70, 0x24, 0x90, 0x07, 0x6c, 0x3b, 0x70, 0x64, 0x23, 0x53, 0x48, + 0x02, 0xf6, 0x56, 0xe0, 0x10, 0x2c, 0x30, 0xe6, 0xbb, 0x06, 0x94, 0xa5, 0xa4, 0x2d, 0xab, 0xcb, + 0x08, 0xda, 0x88, 0x57, 0x21, 0x8f, 0xfb, 0x8a, 0xfe, 0xaa, 0x76, 0xde, 0xaf, 0x95, 0x04, 0x99, + 0xe8, 0x81, 0x46, 0xbc, 0x1e, 0xe5, 0x2e, 0xd8, 0xa3, 0xc7, 0xa1, 0x20, 0x2e, 0x90, 0xda, 0xcc, + 0xe4, 0x79, 0x90, 0x03, 0xb1, 0xc4, 0x99, 0x1f, 0xe7, 0xa0, 0x92, 0x5a, 0x5c, 0x86, 0x76, 0x22, + 0x1e, 0x9a, 0xe6, 0x32, 0x0c, 0xe2, 0xc7, 0x3f, 0xc6, 0xab, 0xf4, 0x35, 0xf3, 0x30, 0xe9, 0xeb, + 0xdb, 0x30, 0x63, 0xf3, 0x3d, 0x8a, 0xfe, 0xdb, 0xb1, 0x31, 0xc9, 0x71, 0x8a, 0xdd, 0x4d, 0xbc, + 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x5b, 0xb0, 0x44, 0x49, 0x48, 0x7b, 0x9b, 0x47, 0x21, 0xa1, + 0xfa, 0xf8, 0xa0, 0x90, 0x14, 0xed, 0x78, 0x90, 0x00, 0x0f, 0xf3, 0x98, 0x87, 0x30, 0x77, 0xc7, + 0x3a, 0xf4, 0xe2, 0x07, 0x31, 0x0c, 0x15, 0xd7, 0xb7, 0xbd, 0xae, 0x43, 0x64, 0x40, 0x8f, 0xa2, + 0x57, 0x74, 0x69, 0x77, 0x74, 0xe4, 0x79, 0xbf, 0x76, 0x29, 0x05, 0x90, 0x2f, 0x40, 0x38, 0x2d, + 0xc2, 0xf4, 0x60, 0xfa, 0x33, 0x6c, 0x40, 0xbf, 0x03, 0xa5, 0xa4, 0x45, 0x78, 0xc4, 0x2a, 0xcd, + 0x37, 0xa0, 0xc8, 0x3d, 0x3e, 0x6a, 0x6d, 0x2f, 0xa8, 0x92, 0xd2, 0xb5, 0x57, 0x2e, 0x4b, 0xed, + 0x25, 0x9e, 0x55, 0xef, 0x76, 0x9c, 0x87, 0x7c, 0x56, 0xcd, 0x3d, 0x4c, 0xe6, 0xcb, 0x4f, 0x98, + 0xf9, 0xae, 0x81, 0xfc, 0xeb, 0x09, 0x4f, 0x32, 0xb2, 0x80, 0xd0, 0x92, 0x8c, 0x9e, 0xff, 0xb5, + 0x37, 0x85, 0x1f, 0x1b, 0x00, 0x62, 0x78, 0x77, 0xf3, 0x94, 0xf8, 0x61, 0x86, 0x07, 0xfc, 0xbb, + 0x30, 0x13, 0x48, 0x8f, 0x94, 0x4f, 0xab, 0x13, 0x4e, 0x88, 0xe3, 0x8b, 0x24, 0x7d, 0x12, 0x2b, + 0x61, 0x8d, 0xab, 0x1f, 0x7c, 0xb2, 0x3a, 0xf5, 0xe1, 0x27, 0xab, 0x53, 0x1f, 0x7d, 0xb2, 0x3a, + 0xf5, 0xf6, 0xd9, 0xaa, 0xf1, 0xc1, 0xd9, 0xaa, 0xf1, 0xe1, 0xd9, 0xaa, 0xf1, 0xd1, 0xd9, 0xaa, + 0xf1, 0xf1, 0xd9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0xa7, 0x5e, 0xcb, 0x9d, 0x6e, 0xfc, 0x27, 0x00, + 0x00, 0xff, 0xff, 0x7e, 0xef, 0x1e, 0xdd, 0xf0, 0x27, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -1908,6 +1911,11 @@ func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FieldValidation) + copy(dAtA[i:], m.FieldValidation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldValidation))) + i-- + dAtA[i] = 0x22 i -= len(m.FieldManager) copy(dAtA[i:], m.FieldManager) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) @@ -2568,6 +2576,11 @@ func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x42 if m.FieldsV1 != nil { { size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) @@ -2976,6 +2989,11 @@ func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FieldValidation) + copy(dAtA[i:], m.FieldValidation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldValidation))) + i-- + dAtA[i] = 0x22 i -= len(m.FieldManager) copy(dAtA[i:], m.FieldManager) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) @@ -3376,6 +3394,11 @@ func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FieldValidation) + copy(dAtA[i:], m.FieldValidation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldValidation))) + i-- + dAtA[i] = 0x1a i -= len(m.FieldManager) copy(dAtA[i:], m.FieldManager) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) @@ -3642,6 +3665,8 @@ func (m *CreateOptions) Size() (n int) { } l = len(m.FieldManager) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldValidation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3914,6 +3939,8 @@ func (m *ManagedFieldsEntry) Size() (n int) { l = m.FieldsV1.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4061,6 +4088,8 @@ func (m *PatchOptions) Size() (n int) { } l = len(m.FieldManager) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldValidation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4219,6 +4248,8 @@ func (m *UpdateOptions) Size() (n int) { } l = len(m.FieldManager) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldValidation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4363,6 +4394,7 @@ func (this *CreateOptions) String() string { s := strings.Join([]string{`&CreateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `FieldValidation:` + fmt.Sprintf("%v", this.FieldValidation) + `,`, `}`, }, "") return s @@ -4508,6 +4540,7 @@ func (this *ManagedFieldsEntry) String() string { `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, `FieldsV1:` + strings.Replace(fmt.Sprintf("%v", this.FieldsV1), "FieldsV1", "FieldsV1", 1) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `}`, }, "") return s @@ -4625,6 +4658,7 @@ func (this *PatchOptions) String() string { `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, `Force:` + valueToStringGenerated(this.Force) + `,`, `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `FieldValidation:` + fmt.Sprintf("%v", this.FieldValidation) + `,`, `}`, }, "") return s @@ -4747,6 +4781,7 @@ func (this *UpdateOptions) String() string { s := strings.Join([]string{`&UpdateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `FieldValidation:` + fmt.Sprintf("%v", this.FieldValidation) + `,`, `}`, }, "") return s @@ -6088,6 +6123,38 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } m.FieldManager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldValidation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldValidation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8442,6 +8509,38 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9783,6 +9882,38 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } m.FieldManager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldValidation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldValidation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11104,6 +11235,38 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } m.FieldManager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldValidation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldValidation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 4f41504f3ff39..472fcacb1063d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -242,6 +242,19 @@ message CreateOptions { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional optional string fieldManager = 3; + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + optional string fieldValidation = 4; } // DeleteOptions may be provided when deleting an API object. @@ -362,7 +375,7 @@ message GroupVersionForDiscovery { } // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionKind { @@ -374,7 +387,7 @@ message GroupVersionKind { } // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionResource { @@ -499,8 +512,6 @@ message ListOptions { // assume bookmarks are returned at any specific interval, nor may they // assume the server will send any BOOKMARK event during a session. // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. // +optional optional bool allowWatchBookmarks = 9; @@ -589,6 +600,15 @@ message ManagedFieldsEntry { // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional optional FieldsV1 fieldsV1 = 7; + + // Subresource is the name of the subresource used to update that object, or + // empty string if the object was updated through the main resource. The + // value of this field is used to distinguish between managers, even if they + // share the same name. For example, a status update will be distinct from a + // regular update using the same manager name. + // Note that the APIVersion field is not related to the Subresource field and + // it always corresponds to the version of the main resource. + optional string subresource = 8; } // MicroTime is version of Time with microsecond level precision. @@ -788,6 +808,7 @@ message ObjectMeta { // OwnerReference contains enough information to let you identify an owning // object. An owning object must be in the same namespace as the dependent, or // be cluster-scoped, so there is no namespace field. +// +structType=atomic message OwnerReference { // API version of the referent. optional string apiVersion = 5; @@ -870,6 +891,19 @@ message PatchOptions { // types (JsonPatch, MergePatch, StrategicMergePatch). // +optional optional string fieldManager = 3; + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + optional string fieldValidation = 4; } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -1087,6 +1121,19 @@ message UpdateOptions { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional optional string fieldManager = 2; + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + optional string fieldValidation = 3; } // Verbs masks the value so protobuf can generate diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go index 54a0944af1138..fc9e521e211be 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -44,7 +44,7 @@ func (gr *GroupResource) String() string { } // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionResource struct { @@ -80,7 +80,7 @@ func (gk *GroupKind) String() string { } // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionKind struct { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 3c5a1518c8ed0..2b6a30b655a68 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -38,13 +38,13 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { return labels.Everything(), nil } - selector := labels.NewSelector() + requirements := make([]labels.Requirement, 0, len(ps.MatchLabels)+len(ps.MatchExpressions)) for k, v := range ps.MatchLabels { r, err := labels.NewRequirement(k, selection.Equals, []string{v}) if err != nil { return nil, err } - selector = selector.Add(*r) + requirements = append(requirements, *r) } for _, expr := range ps.MatchExpressions { var op selection.Operator @@ -64,8 +64,10 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { if err != nil { return nil, err } - selector = selector.Add(*r) + requirements = append(requirements, *r) } + selector := labels.NewSelector() + selector = selector.Add(requirements...) return selector, nil } @@ -154,7 +156,7 @@ func SetAsLabelSelector(ls labels.Set) *LabelSelector { } selector := &LabelSelector{ - MatchLabels: make(map[string]string), + MatchLabels: make(map[string]string, len(ls)), } for label, value := range ls { selector.MatchLabels[label] = value diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go index befab16f729d4..3cf9d48e96115 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -1,3 +1,4 @@ +//go:build !notest // +build !notest /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go index 94ad8d7cf459d..bf9e21b5bd956 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -1,3 +1,4 @@ +//go:build !notest // +build !notest /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 79e2ad48a0402..f9c27c146d471 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -303,6 +303,7 @@ const ( // OwnerReference contains enough information to let you identify an owning // object. An owning object must be in the same namespace as the dependent, or // be cluster-scoped, so there is no namespace field. +// +structType=atomic type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` @@ -356,8 +357,6 @@ type ListOptions struct { // assume bookmarks are returned at any specific interval, nor may they // assume the server will send any BOOKMARK event during a session. // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. // +optional AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` @@ -523,6 +522,15 @@ type DeleteOptions struct { DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } +const ( + // FieldValidationIgnore ignores unknown/duplicate fields + FieldValidationIgnore = "Ignore" + // FieldValidationWarn responds with a warning, but successfully serve the request + FieldValidationWarn = "Warn" + // FieldValidationStrict fails the request on unknown/duplicate fields + FieldValidationStrict = "Strict" +) + // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -545,6 +553,19 @@ type CreateOptions struct { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,4,name=fieldValidation"` } // +k8s:conversion-gen:explicit-from=net/url.Values @@ -578,6 +599,19 @@ type PatchOptions struct { // types (JsonPatch, MergePatch, StrategicMergePatch). // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,4,name=fieldValidation"` } // ApplyOptions may be provided when applying an API object. @@ -633,6 +667,19 @@ type UpdateOptions struct { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,3,name=fieldValidation"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -1180,6 +1227,15 @@ type ManagedFieldsEntry struct { // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` + + // Subresource is the name of the subresource used to update that object, or + // empty string if the object was updated through the main resource. The + // value of this field is used to distinguish between managers, even if they + // share the same name. For example, a status update will be distinct from a + // regular update using the same manager name. + // Note that the APIVersion field is not related to the Subresource field and + // it always corresponds to the version of the main resource. + Subresource string `json:"subresource,omitempty" protobuf:"bytes,8,opt,name=subresource"` } // ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index c33d8ffa7387f..088ff01f0becf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -112,9 +112,10 @@ func (Condition) SwaggerDoc() map[string]string { } var map_CreateOptions = map[string]string{ - "": "CreateOptions may be provided when creating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -209,7 +210,7 @@ var map_ListOptions = map[string]string{ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", "resourceVersion": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", "resourceVersionMatch": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", @@ -222,13 +223,14 @@ func (ListOptions) SwaggerDoc() map[string]string { } var map_ManagedFieldsEntry = map[string]string{ - "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", - "manager": "Manager is an identifier of the workflow managing these fields.", - "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", - "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", - "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", - "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", + "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "manager": "Manager is an identifier of the workflow managing these fields.", + "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", + "subresource": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.", } func (ManagedFieldsEntry) SwaggerDoc() map[string]string { @@ -301,10 +303,11 @@ func (Patch) SwaggerDoc() map[string]string { } var map_PatchOptions = map[string]string{ - "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -446,9 +449,10 @@ func (TypeMeta) SwaggerDoc() map[string]string { } var map_UpdateOptions = map[string]string{ - "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 7b101ea512452..d26c6cff4e6f3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -382,7 +382,7 @@ func (unstructuredJSONScheme) Identifier() runtime.Identifier { func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { type detector struct { - Items gojson.RawMessage + Items gojson.RawMessage `json:"items"` } var det detector if err := json.Unmarshal(data, &det); err != nil { @@ -425,7 +425,7 @@ func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstru func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { type decodeList struct { - Items []gojson.RawMessage + Items []gojson.RawMessage `json:"items"` } var dList decodeList diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 9a9f25e8f26e4..fe8250dd6fd0c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index 3ecb67c827984..b7590f0b313ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -293,6 +294,13 @@ func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptio } else { out.FieldManager = "" } + if values, ok := map[string][]string(*in)["fieldValidation"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldValidation, s); err != nil { + return err + } + } else { + out.FieldValidation = "" + } return nil } @@ -448,6 +456,13 @@ func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions } else { out.FieldManager = "" } + if values, ok := map[string][]string(*in)["fieldValidation"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldValidation, s); err != nil { + return err + } + } else { + out.FieldValidation = "" + } return nil } @@ -496,6 +511,13 @@ func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptio } else { out.FieldManager = "" } + if values, ok := map[string][]string(*in)["fieldValidation"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldValidation, s); err != nil { + return err + } + } else { + out.FieldValidation = "" + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index d43020da573ee..418e6099f4007 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go index cce2e603a69ad..dac177e93bd0d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 89053b9819475..972b7f03ea0f8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go index 73e63fc114d33..198b5be4af534 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 79134847644a1..ed51a25e33852 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -44,23 +44,16 @@ type Converter struct { generatedConversionFuncs ConversionFuncs // Set of conversions that should be treated as a no-op - ignoredConversions map[typePair]struct{} ignoredUntypedConversions map[typePair]struct{} - - // nameFunc is called to retrieve the name of a type; this name is used for the - // purpose of deciding whether two types match or not (i.e., will we attempt to - // do a conversion). The default returns the go type name. - nameFunc func(t reflect.Type) string } // NewConverter creates a new Converter object. -func NewConverter(nameFn NameFunc) *Converter { +// Arg NameFunc is just for backward compatibility. +func NewConverter(NameFunc) *Converter { c := &Converter{ conversionFuncs: NewConversionFuncs(), generatedConversionFuncs: NewConversionFuncs(), - ignoredConversions: make(map[typePair]struct{}), ignoredUntypedConversions: make(map[typePair]struct{}), - nameFunc: nameFn, } c.RegisterUntypedConversionFunc( (*[]byte)(nil), (*[]byte)(nil), @@ -192,7 +185,6 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { if typeTo.Kind() != reflect.Ptr { return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) } - c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index b0865777a29a9..9eea34579b80d 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -31,12 +31,15 @@ import ( ) var ( - validRequirementOperators = []string{ + unaryOperators = []string{ + string(selection.Exists), string(selection.DoesNotExist), + } + binaryOperators = []string{ string(selection.In), string(selection.NotIn), string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals), - string(selection.Exists), string(selection.DoesNotExist), string(selection.GreaterThan), string(selection.LessThan), } + validRequirementOperators = append(binaryOperators, unaryOperators...) ) // Requirements is AND of all requirements. @@ -140,7 +143,7 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. // (2) If the operator is In or NotIn, the values set must be non-empty. // (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. // (4) If the operator is Exists or DoesNotExist, the value set must be empty. @@ -364,13 +367,9 @@ func safeSort(in []string) []string { // Add adds requirements to the selector. It copies the current selector returning a new one func (s internalSelector) Add(reqs ...Requirement) Selector { - var ret internalSelector - for ix := range s { - ret = append(ret, s[ix]) - } - for _, r := range reqs { - ret = append(ret, r) - } + ret := make(internalSelector, 0, len(s)+len(reqs)) + ret = append(ret, s...) + ret = append(ret, reqs...) sort.Sort(ByKey(ret)) return ret } @@ -753,7 +752,7 @@ func (p *Parser) parseOperator() (op selection.Operator, err error) { case NotEqualsToken: op = selection.NotEquals default: - return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + return "", fmt.Errorf("found '%s', expected: %v", lit, strings.Join(binaryOperators, ", ")) } return op, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 4d482947fcd17..fdf4c31e1ea6e 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 4a6cc68574a45..b99492a891f0b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -22,6 +22,7 @@ import ( "math" "os" "reflect" + "sort" "strconv" "strings" "sync" @@ -109,21 +110,141 @@ type unstructuredConverter struct { // to Go types via reflection. It performs mismatch detection automatically and is intended for use by external // test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return NewTestUnstructuredConverterWithValidation(comparison) +} + +// NewTestUnstrucutredConverterWithValidation allows for access to +// FromUnstructuredWithValidation from within tests. +func NewTestUnstructuredConverterWithValidation(comparison conversion.Equalities) *unstructuredConverter { return &unstructuredConverter{ mismatchDetection: true, comparison: comparison, } } -// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// fromUnstructuredContext provides options for informing the converter +// the state of its recursive walk through the conversion process. +type fromUnstructuredContext struct { + // isInlined indicates whether the converter is currently in + // an inlined field or not to determine whether it should + // validate the matchedKeys yet or only collect them. + // This should only be set from `structFromUnstructured` + isInlined bool + // matchedKeys is a stack of the set of all fields that exist in the + // concrete go type of the object being converted into. + // This should only be manipulated via `pushMatchedKeyTracker`, + // `recordMatchedKey`, or `popAndVerifyMatchedKeys` + matchedKeys []map[string]struct{} + // parentPath collects the path that the conversion + // takes as it traverses the unstructured json map. + // It is used to report the full path to any unknown + // fields that the converter encounters. + parentPath []string + // returnUnknownFields indicates whether or not + // unknown field errors should be collected and + // returned to the caller + returnUnknownFields bool + // unknownFieldErrors are the collection of + // the full path to each unknown field in the + // object. + unknownFieldErrors []error +} + +// pushMatchedKeyTracker adds a placeholder set for tracking +// matched keys for the given level. This should only be +// called from `structFromUnstructured`. +func (c *fromUnstructuredContext) pushMatchedKeyTracker() { + if !c.returnUnknownFields { + return + } + + c.matchedKeys = append(c.matchedKeys, nil) +} + +// recordMatchedKey initializes the last element of matchedKeys +// (if needed) and sets 'key'. This should only be called from +// `structFromUnstructured`. +func (c *fromUnstructuredContext) recordMatchedKey(key string) { + if !c.returnUnknownFields { + return + } + + last := len(c.matchedKeys) - 1 + if c.matchedKeys[last] == nil { + c.matchedKeys[last] = map[string]struct{}{} + } + c.matchedKeys[last][key] = struct{}{} +} + +// popAndVerifyMatchedKeys pops the last element of matchedKeys, +// checks the matched keys against the data, and adds unknown +// field errors for any matched keys. +// `mapValue` is the value of sv containing all of the keys that exist at this level +// (ie. sv.MapKeys) in the source data. +// `matchedKeys` are all the keys found for that level in the destination object. +// This should only be called from `structFromUnstructured`. +func (c *fromUnstructuredContext) popAndVerifyMatchedKeys(mapValue reflect.Value) { + if !c.returnUnknownFields { + return + } + + last := len(c.matchedKeys) - 1 + curMatchedKeys := c.matchedKeys[last] + c.matchedKeys[last] = nil + c.matchedKeys = c.matchedKeys[:last] + for _, key := range mapValue.MapKeys() { + if _, ok := curMatchedKeys[key.String()]; !ok { + c.recordUnknownField(key.String()) + } + } +} + +func (c *fromUnstructuredContext) recordUnknownField(field string) { + if !c.returnUnknownFields { + return + } + + pathLen := len(c.parentPath) + c.pushKey(field) + errPath := strings.Join(c.parentPath, "") + c.parentPath = c.parentPath[:pathLen] + c.unknownFieldErrors = append(c.unknownFieldErrors, fmt.Errorf(`unknown field "%s"`, errPath)) +} + +func (c *fromUnstructuredContext) pushIndex(index int) { + if !c.returnUnknownFields { + return + } + + c.parentPath = append(c.parentPath, "[", strconv.Itoa(index), "]") +} + +func (c *fromUnstructuredContext) pushKey(key string) { + if !c.returnUnknownFields { + return + } + + if len(c.parentPath) > 0 { + c.parentPath = append(c.parentPath, ".") + } + c.parentPath = append(c.parentPath, key) + +} + +// FromUnstructuredWIthValidation converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. -func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { +// It takes a validationDirective that indicates how to behave when it encounters unknown fields. +func (c *unstructuredConverter) FromUnstructuredWithValidation(u map[string]interface{}, obj interface{}, returnUnknownFields bool) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) if t.Kind() != reflect.Ptr || value.IsNil() { return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) } - err := fromUnstructured(reflect.ValueOf(u), value.Elem()) + + fromUnstructuredContext := &fromUnstructuredContext{ + returnUnknownFields: returnUnknownFields, + } + err := fromUnstructured(reflect.ValueOf(u), value.Elem(), fromUnstructuredContext) if c.mismatchDetection { newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) @@ -134,7 +255,23 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } - return err + if err != nil { + return err + } + if returnUnknownFields && len(fromUnstructuredContext.unknownFieldErrors) > 0 { + sort.Slice(fromUnstructuredContext.unknownFieldErrors, func(i, j int) bool { + return fromUnstructuredContext.unknownFieldErrors[i].Error() < + fromUnstructuredContext.unknownFieldErrors[j].Error() + }) + return NewStrictDecodingError(fromUnstructuredContext.unknownFieldErrors) + } + return nil +} + +// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// It uses encoding/json/Unmarshaler if object implements it or reflection if not. +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { + return c.FromUnstructuredWithValidation(u, obj, false) } func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { @@ -145,7 +282,7 @@ func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { return json.Unmarshal(data, obj) } -func fromUnstructured(sv, dv reflect.Value) error { +func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { sv = unwrapInterface(sv) if !sv.IsValid() { dv.Set(reflect.Zero(dv.Type())) @@ -213,18 +350,19 @@ func fromUnstructured(sv, dv reflect.Value) error { switch dt.Kind() { case reflect.Map: - return mapFromUnstructured(sv, dv) + return mapFromUnstructured(sv, dv, ctx) case reflect.Slice: - return sliceFromUnstructured(sv, dv) + return sliceFromUnstructured(sv, dv, ctx) case reflect.Ptr: - return pointerFromUnstructured(sv, dv) + return pointerFromUnstructured(sv, dv, ctx) case reflect.Struct: - return structFromUnstructured(sv, dv) + return structFromUnstructured(sv, dv, ctx) case reflect.Interface: return interfaceFromUnstructured(sv, dv) default: return fmt.Errorf("unrecognized type: %v", dt.Kind()) } + } func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { @@ -275,7 +413,7 @@ func unwrapInterface(v reflect.Value) reflect.Value { return v } -func mapFromUnstructured(sv, dv reflect.Value) error { +func mapFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() != reflect.Map { return fmt.Errorf("cannot restore map from %v", st.Kind()) @@ -293,7 +431,7 @@ func mapFromUnstructured(sv, dv reflect.Value) error { for _, key := range sv.MapKeys() { value := reflect.New(dt.Elem()).Elem() if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() { - if err := fromUnstructured(val, value); err != nil { + if err := fromUnstructured(val, value, ctx); err != nil { return err } } else { @@ -308,7 +446,7 @@ func mapFromUnstructured(sv, dv reflect.Value) error { return nil } -func sliceFromUnstructured(sv, dv reflect.Value) error { +func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 { // We store original []byte representation as string. @@ -340,15 +478,22 @@ func sliceFromUnstructured(sv, dv reflect.Value) error { return nil } dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + + pathLen := len(ctx.parentPath) + defer func() { + ctx.parentPath = ctx.parentPath[:pathLen] + }() for i := 0; i < sv.Len(); i++ { - if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil { + ctx.pushIndex(i) + if err := fromUnstructured(sv.Index(i), dv.Index(i), ctx); err != nil { return err } + ctx.parentPath = ctx.parentPath[:pathLen] } return nil } -func pointerFromUnstructured(sv, dv reflect.Value) error { +func pointerFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() == reflect.Ptr && sv.IsNil() { @@ -358,38 +503,63 @@ func pointerFromUnstructured(sv, dv reflect.Value) error { dv.Set(reflect.New(dt.Elem())) switch st.Kind() { case reflect.Ptr, reflect.Interface: - return fromUnstructured(sv.Elem(), dv.Elem()) + return fromUnstructured(sv.Elem(), dv.Elem(), ctx) default: - return fromUnstructured(sv, dv.Elem()) + return fromUnstructured(sv, dv.Elem(), ctx) } } -func structFromUnstructured(sv, dv reflect.Value) error { +func structFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() != reflect.Map { return fmt.Errorf("cannot restore struct from: %v", st.Kind()) } + pathLen := len(ctx.parentPath) + svInlined := ctx.isInlined + defer func() { + ctx.parentPath = ctx.parentPath[:pathLen] + ctx.isInlined = svInlined + }() + if !svInlined { + ctx.pushMatchedKeyTracker() + } for i := 0; i < dt.NumField(); i++ { fieldInfo := fieldInfoFromField(dt, i) fv := dv.Field(i) if len(fieldInfo.name) == 0 { - // This field is inlined. - if err := fromUnstructured(sv, fv); err != nil { + // This field is inlined, recurse into fromUnstructured again + // with the same set of matched keys. + ctx.isInlined = true + if err := fromUnstructured(sv, fv, ctx); err != nil { return err } + ctx.isInlined = svInlined } else { + // This field is not inlined so we recurse into + // child field of sv corresponding to field i of + // dv, with a new set of matchedKeys and updating + // the parentPath to indicate that we are one level + // deeper. + ctx.recordMatchedKey(fieldInfo.name) value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue)) if value.IsValid() { - if err := fromUnstructured(value, fv); err != nil { + ctx.isInlined = false + ctx.pushKey(fieldInfo.name) + if err := fromUnstructured(value, fv, ctx); err != nil { return err } + ctx.parentPath = ctx.parentPath[:pathLen] + ctx.isInlined = svInlined } else { fv.Set(reflect.Zero(fv.Type())) } } } + if !svInlined { + ctx.popAndVerifyMatchedKeys(sv) + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index be0c5edc8554e..7dfa45762fafc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -19,6 +19,7 @@ package runtime import ( "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -124,20 +125,30 @@ func IsMissingVersion(err error) bool { // strictDecodingError is a base error type that is returned by a strict Decoder such // as UniversalStrictDecoder. type strictDecodingError struct { - message string - data string + errors []error } // NewStrictDecodingError creates a new strictDecodingError object. -func NewStrictDecodingError(message string, data string) error { +func NewStrictDecodingError(errors []error) error { return &strictDecodingError{ - message: message, - data: data, + errors: errors, } } func (e *strictDecodingError) Error() string { - return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) + var s strings.Builder + s.WriteString("strict decoding error: ") + for i, err := range e.errors { + if i != 0 { + s.WriteString(", ") + } + s.WriteString(err.Error()) + } + return s.String() +} + +func (e *strictDecodingError) Errors() []error { + return e.errors } // IsStrictDecodingError returns true if the error indicates that the provided object @@ -149,3 +160,13 @@ func IsStrictDecodingError(err error) bool { _, ok := err.(*strictDecodingError) return ok } + +// AsStrictDecodingError returns a strict decoding error +// containing all the strictness violations. +func AsStrictDecodingError(err error) (*strictDecodingError, bool) { + if err == nil { + return nil, false + } + strictErr, ok := err.(*strictDecodingError) + return strictErr, ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 3e1fab1d11019..b324b76c676c6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -125,6 +125,9 @@ type SerializerInfo struct { // PrettySerializer, if set, can serialize this object in a form biased towards // readability. PrettySerializer Serializer + // StrictSerializer, if set, deserializes this object strictly, + // erring on unknown fields. + StrictSerializer Serializer // StreamSerializer, if set, describes the streaming serialization format // for this media type. StreamSerializer *StreamSerializerInfo diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index ae47ab3abae49..f5da6b12f44b9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -99,7 +99,7 @@ func NewScheme() *Scheme { versionPriority: map[string][]string{}, schemeName: naming.GetNameFromCallsite(internalPackages...), } - s.converter = conversion.NewConverter(s.nameFunc) + s.converter = conversion.NewConverter(nil) // Enable couple default conversions by default. utilruntime.Must(RegisterEmbeddedConversions(s)) @@ -107,28 +107,6 @@ func NewScheme() *Scheme { return s } -// nameFunc returns the name of the type that we wish to use to determine when two types attempt -// a conversion. Defaults to the go name of the type if the type is not registered. -func (s *Scheme) nameFunc(t reflect.Type) string { - // find the preferred names for this type - gvks, ok := s.typeToGVK[t] - if !ok { - return t.Name() - } - - for _, gvk := range gvks { - internalGV := gvk.GroupVersion() - internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in - internalGVK := internalGV.WithKind(gvk.Kind) - - if internalType, exists := s.gvkToType[internalGVK]; exists { - return s.typeToGVK[internalType][0].Kind - } - } - - return gvks[0].Kind -} - // Converter allows access to the converter for the scheme func (s *Scheme) Converter() *conversion.Converter { return s.converter diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index e55ab94d1475b..9de35e791c007 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -40,6 +40,7 @@ type serializerType struct { Serializer runtime.Serializer PrettySerializer runtime.Serializer + StrictSerializer runtime.Serializer AcceptStreamContentTypes []string StreamContentType string @@ -70,10 +71,20 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option ) } + strictJSONSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, + ) + jsonSerializerType.StrictSerializer = strictJSONSerializer + yamlSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, ) + strictYAMLSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: true}, + ) protoSerializer := protobuf.NewSerializer(scheme, scheme) protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) @@ -85,12 +96,16 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option FileExtensions: []string{"yaml"}, EncodesAsText: true, Serializer: yamlSerializer, + StrictSerializer: strictYAMLSerializer, }, { AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, ContentType: runtime.ContentTypeProtobuf, FileExtensions: []string{"pb"}, Serializer: protoSerializer, + // note, strict decoding is unsupported for protobuf, + // fall back to regular serializing + StrictSerializer: protoSerializer, Framer: protobuf.LengthDelimitedFramer, StreamSerializer: protoRawSerializer, @@ -187,6 +202,7 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec EncodesAsText: d.EncodesAsText, Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, + StrictSerializer: d.StrictSerializer, } mediaType, _, err := mime.ParseMediaType(info.MediaType) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 48f0777b24e2a..6c082f660eeed 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -20,10 +20,8 @@ import ( "encoding/json" "io" "strconv" - "unsafe" - jsoniter "github.com/json-iterator/go" - "github.com/modern-go/reflect2" + kjson "sigs.k8s.io/json" "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" @@ -68,6 +66,7 @@ func identifier(options SerializerOptions) runtime.Identifier { "name": "json", "yaml": strconv.FormatBool(options.Yaml), "pretty": strconv.FormatBool(options.Pretty), + "strict": strconv.FormatBool(options.Strict), } identifier, err := json.Marshal(result) if err != nil { @@ -110,79 +109,6 @@ type Serializer struct { var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} -type customNumberExtension struct { - jsoniter.DummyExtension -} - -func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { - if typ.String() == "interface {}" { - return customNumberDecoder{} - } - return nil -} - -type customNumberDecoder struct { -} - -func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - switch iter.WhatIsNext() { - case jsoniter.NumberValue: - var number jsoniter.Number - iter.ReadVal(&number) - i64, err := strconv.ParseInt(string(number), 10, 64) - if err == nil { - *(*interface{})(ptr) = i64 - return - } - f64, err := strconv.ParseFloat(string(number), 64) - if err == nil { - *(*interface{})(ptr) = f64 - return - } - iter.ReportError("DecodeNumber", err.Error()) - default: - *(*interface{})(ptr) = iter.Read() - } -} - -// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be -// case-sensitive when unmarshalling, and otherwise compatible with -// the encoding/json standard library. -func CaseSensitiveJSONIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} - -// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be -// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with -// the encoding/json standard library. -func StrictCaseSensitiveJSONIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - DisallowUnknownFields: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} - -// Private copies of jsoniter to try to shield against possible mutations -// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them -// in some other library will mess with every usage of the jsoniter library in the whole program. -// See https://github.com/json-iterator/go/issues/265 -var caseSensitiveJSONIterator = CaseSensitiveJSONIterator() -var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator() - // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { if len(actual.Kind) == 0 { @@ -237,8 +163,11 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil { + strictErrs, err := s.unmarshal(into, data, originalData) + if err != nil { return nil, actual, err + } else if len(strictErrs) > 0 { + return into, actual, runtime.NewStrictDecodingError(strictErrs) } return into, actual, nil case err != nil: @@ -261,35 +190,12 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil { - return nil, actual, err - } - - // If the deserializer is non-strict, return successfully here. - if !s.options.Strict { - return obj, actual, nil - } - - // In strict mode pass the data trough the YAMLToJSONStrict converter. - // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, - // the output would equal the input, unless there is a parsing error such as duplicate fields. - // As we know this was successful in the non-strict case, the only error that may be returned here - // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError - // the actual error is that the object contains duplicate fields. - altered, err := yaml.YAMLToJSONStrict(originalData) + strictErrs, err := s.unmarshal(obj, data, originalData) if err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) - } - // As performance is not an issue for now for the strict deserializer (one has regardless to do - // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated - // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is - // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, - // the actual error is that the object contains unknown field. - strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + return nil, actual, err + } else if len(strictErrs) > 0 { + return obj, actual, runtime.NewStrictDecodingError(strictErrs) } - // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } @@ -303,7 +209,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { if s.options.Yaml { - json, err := caseSensitiveJSONIterator.Marshal(obj) + json, err := json.Marshal(obj) if err != nil { return err } @@ -316,7 +222,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { } if s.options.Pretty { - data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ") + data, err := json.MarshalIndent(obj, "", " ") if err != nil { return err } @@ -327,6 +233,50 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { return encoder.Encode(obj) } +// IsStrict indicates whether the serializer +// uses strict decoding or not +func (s *Serializer) IsStrict() bool { + return s.options.Strict +} + +func (s *Serializer) unmarshal(into runtime.Object, data, originalData []byte) (strictErrs []error, err error) { + // If the deserializer is non-strict, return here. + if !s.options.Strict { + if err := kjson.UnmarshalCaseSensitivePreserveInts(data, into); err != nil { + return nil, err + } + return nil, nil + } + + if s.options.Yaml { + // In strict mode pass the original data through the YAMLToJSONStrict converter. + // This is done to catch duplicate fields in YAML that would have been dropped in the original YAMLToJSON conversion. + // TODO: rework YAMLToJSONStrict to return warnings about duplicate fields without terminating so we don't have to do this twice. + _, err := yaml.YAMLToJSONStrict(originalData) + if err != nil { + strictErrs = append(strictErrs, err) + } + } + + var strictJSONErrs []error + if u, isUnstructured := into.(runtime.Unstructured); isUnstructured { + // Unstructured is a custom unmarshaler that gets delegated + // to, so inorder to detect strict JSON errors we need + // to unmarshal directly into the object. + m := u.UnstructuredContent() + strictJSONErrs, err = kjson.UnmarshalStrict(data, &m) + u.SetUnstructuredContent(m) + } else { + strictJSONErrs, err = kjson.UnmarshalStrict(data, into) + } + if err != nil { + // fatal decoding error, not due to strictness + return nil, err + } + strictErrs = append(strictErrs, strictJSONErrs...) + return strictErrs, nil +} + // Identifier implements runtime.Encoder interface. func (s *Serializer) Identifier() runtime.Identifier { return s.identifier diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go index 709f85291150c..5a6c200dd1829 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go @@ -109,10 +109,16 @@ func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime for _, r := range skipped { out, actual, err := r.Decode(data, gvk, into) if err != nil { - lastErr = err - continue + // if we got an object back from the decoder, and the + // error was a strict decoding error (e.g. unknown or + // duplicate fields), we still consider the recognizer + // to have understood the object + if out == nil || !runtime.IsStrictDecodingError(err) { + lastErr = err + continue + } } - return out, actual, nil + return out, actual, err } if lastErr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index a60a7c04156be..971c46d496a31 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -90,7 +90,6 @@ func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) } // must read the rest of the frame (until we stop getting ErrShortBuffer) d.resetRead = true - base = 0 return nil, nil, ErrObjectTooLarge } if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 718c5dfb7df75..ea7c580bd6b06 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -133,11 +133,18 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } } + var strictDecodingErr error obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) if err != nil { - return nil, gvk, err + if obj != nil && runtime.IsStrictDecodingError(err) { + // save the strictDecodingError and the caller decide what to do with it + strictDecodingErr = err + } else { + return nil, gvk, err + } } + // TODO: look into strict handling of nested object decoding if d, ok := obj.(runtime.NestedObjectDecoder); ok { if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { return nil, gvk, err @@ -153,14 +160,14 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru // Short-circuit conversion if the into object is same object if into == obj { - return into, gvk, nil + return into, gvk, strictDecodingErr } if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } - return into, gvk, nil + return into, gvk, strictDecodingErr } // perform defaulting if requested @@ -172,7 +179,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if err != nil { return nil, gvk, err } - return out, gvk, nil + return out, gvk, strictDecodingErr } // Encode ensures the provided object is output in the appropriate group and version, invoking diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index b0393839e1f40..069ea4f92d2d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index faf2c264538a9..0d2f153bf9eb2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -21,17 +21,17 @@ import ( "sync" "time" - utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) // NewExpiring returns an initialized expiring cache. func NewExpiring() *Expiring { - return NewExpiringWithClock(utilclock.RealClock{}) + return NewExpiringWithClock(clock.RealClock{}) } // NewExpiringWithClock is like NewExpiring but allows passing in a custom // clock for testing. -func NewExpiringWithClock(clock utilclock.Clock) *Expiring { +func NewExpiringWithClock(clock clock.Clock) *Expiring { return &Expiring{ clock: clock, cache: make(map[interface{}]entry), @@ -40,7 +40,7 @@ func NewExpiringWithClock(clock utilclock.Clock) *Expiring { // Expiring is a map whose entries expire after a per-entry timeout. type Expiring struct { - clock utilclock.Clock + clock clock.Clock // mu protects the below fields mu sync.RWMutex diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index f6b307aa68e3b..1328dd6120217 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -17,10 +17,9 @@ limitations under the License. package cache import ( + "container/list" "sync" "time" - - "github.com/hashicorp/golang-lru" ) // Clock defines an interface for obtaining the current time @@ -39,8 +38,11 @@ type LRUExpireCache struct { // clock is used to obtain the current time clock Clock - cache *lru.Cache - lock sync.Mutex + lock sync.Mutex + + maxSize int + evictionList list.List + entries map[interface{}]*list.Element } // NewLRUExpireCache creates an expiring cache with the given size @@ -50,15 +52,19 @@ func NewLRUExpireCache(maxSize int) *LRUExpireCache { // NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time. func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { - cache, err := lru.New(maxSize) - if err != nil { - // if called with an invalid size - panic(err) + if maxSize <= 0 { + panic("maxSize must be > 0") + } + + return &LRUExpireCache{ + clock: clock, + maxSize: maxSize, + entries: map[interface{}]*list.Element{}, } - return &LRUExpireCache{clock: clock, cache: cache} } type cacheEntry struct { + key interface{} value interface{} expireTime time.Time } @@ -67,7 +73,31 @@ type cacheEntry struct { func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) { c.lock.Lock() defer c.lock.Unlock() - c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)}) + + // Key already exists + oldElement, ok := c.entries[key] + if ok { + c.evictionList.MoveToFront(oldElement) + oldElement.Value.(*cacheEntry).value = value + oldElement.Value.(*cacheEntry).expireTime = c.clock.Now().Add(ttl) + return + } + + // Make space if necessary + if c.evictionList.Len() >= c.maxSize { + toEvict := c.evictionList.Back() + c.evictionList.Remove(toEvict) + delete(c.entries, toEvict.Value.(*cacheEntry).key) + } + + // Add new entry + entry := &cacheEntry{ + key: key, + value: value, + expireTime: c.clock.Now().Add(ttl), + } + element := c.evictionList.PushFront(entry) + c.entries[key] = element } // Get returns the value at the specified key from the cache if it exists and is not @@ -75,28 +105,56 @@ func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Durati func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) { c.lock.Lock() defer c.lock.Unlock() - e, ok := c.cache.Get(key) + + element, ok := c.entries[key] if !ok { return nil, false } - if c.clock.Now().After(e.(*cacheEntry).expireTime) { - c.cache.Remove(key) + + if c.clock.Now().After(element.Value.(*cacheEntry).expireTime) { + c.evictionList.Remove(element) + delete(c.entries, key) return nil, false } - return e.(*cacheEntry).value, true + + c.evictionList.MoveToFront(element) + + return element.Value.(*cacheEntry).value, true } // Remove removes the specified key from the cache if it exists func (c *LRUExpireCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() - c.cache.Remove(key) + + element, ok := c.entries[key] + if !ok { + return + } + + c.evictionList.Remove(element) + delete(c.entries, key) } -// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to -// get may return not found. It returns all keys from oldest to newest. +// Keys returns all unexpired keys in the cache. +// +// Keep in mind that subsequent calls to Get() for any of the returned keys +// might return "not found". +// +// Keys are returned ordered from least recently used to most recently used. func (c *LRUExpireCache) Keys() []interface{} { c.lock.Lock() defer c.lock.Unlock() - return c.cache.Keys() + + now := c.clock.Now() + + val := make([]interface{}, 0, c.evictionList.Len()) + for element := c.evictionList.Back(); element != nil; element = element.Prev() { + // Only return unexpired keys + if !now.After(element.Value.(*cacheEntry).expireTime) { + val = append(val, element.Value.(*cacheEntry).key) + } + } + + return val } diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go deleted file mode 100644 index 1a544d3b2e4de..0000000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ /dev/null @@ -1,445 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clock - -import ( - "sync" - "time" -) - -// PassiveClock allows for injecting fake or real clocks into code -// that needs to read the current time but does not support scheduling -// activity in the future. -type PassiveClock interface { - Now() time.Time - Since(time.Time) time.Duration -} - -// Clock allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type Clock interface { - PassiveClock - After(time.Duration) <-chan time.Time - AfterFunc(time.Duration, func()) Timer - NewTimer(time.Duration) Timer - Sleep(time.Duration) - NewTicker(time.Duration) Ticker -} - -// RealClock really calls time.Now() -type RealClock struct{} - -// Now returns the current time. -func (RealClock) Now() time.Time { - return time.Now() -} - -// Since returns time since the specified timestamp. -func (RealClock) Since(ts time.Time) time.Duration { - return time.Since(ts) -} - -// After is the same as time.After(d). -func (RealClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -// AfterFunc is the same as time.AfterFunc(d, f). -func (RealClock) AfterFunc(d time.Duration, f func()) Timer { - return &realTimer{ - timer: time.AfterFunc(d, f), - } -} - -// NewTimer returns a new Timer. -func (RealClock) NewTimer(d time.Duration) Timer { - return &realTimer{ - timer: time.NewTimer(d), - } -} - -// NewTicker returns a new Ticker. -func (RealClock) NewTicker(d time.Duration) Ticker { - return &realTicker{ - ticker: time.NewTicker(d), - } -} - -// Sleep pauses the RealClock for duration d. -func (RealClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -// FakePassiveClock implements PassiveClock, but returns an arbitrary time. -type FakePassiveClock struct { - lock sync.RWMutex - time time.Time -} - -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { - FakePassiveClock - - // waiters are waiting for the fake time to pass their specified time - waiters []fakeClockWaiter -} - -type fakeClockWaiter struct { - targetTime time.Time - stepInterval time.Duration - skipIfBlocked bool - destChan chan time.Time - afterFunc func() -} - -// NewFakePassiveClock returns a new FakePassiveClock. -func NewFakePassiveClock(t time.Time) *FakePassiveClock { - return &FakePassiveClock{ - time: t, - } -} - -// NewFakeClock returns a new FakeClock -func NewFakeClock(t time.Time) *FakeClock { - return &FakeClock{ - FakePassiveClock: *NewFakePassiveClock(t), - } -} - -// Now returns f's time. -func (f *FakePassiveClock) Now() time.Time { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time -} - -// Since returns time since the time in f. -func (f *FakePassiveClock) Since(ts time.Time) time.Duration { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time.Sub(ts) -} - -// SetTime sets the time on the FakePassiveClock. -func (f *FakePassiveClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.time = t -} - -// After is the Fake version of time.After(d). -func (f *FakeClock) After(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }) - return ch -} - -// AfterFunc is the Fake version of time.AfterFunc(d, callback). -func (f *FakeClock) AfterFunc(d time.Duration, cb func()) Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - afterFunc: cb, - }, - } - f.waiters = append(f.waiters, timer.waiter) - return timer -} - -// NewTimer is the Fake version of time.NewTimer(d). -func (f *FakeClock) NewTimer(d time.Duration) Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }, - } - f.waiters = append(f.waiters, timer.waiter) - return timer -} - -// NewTicker returns a new Ticker. -func (f *FakeClock) NewTicker(d time.Duration) Ticker { - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return &fakeTicker{ - c: ch, - } -} - -// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer -func (f *FakeClock) Step(d time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(f.time.Add(d)) -} - -// SetTime sets the time on a FakeClock. -func (f *FakeClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(t) -} - -// Actually changes the time and checks any waiters. f must be write-locked. -func (f *FakeClock) setTimeLocked(t time.Time) { - f.time = t - newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) - for i := range f.waiters { - w := &f.waiters[i] - if !w.targetTime.After(t) { - - if w.skipIfBlocked { - select { - case w.destChan <- t: - default: - } - } else { - w.destChan <- t - } - - if w.afterFunc != nil { - w.afterFunc() - } - - if w.stepInterval > 0 { - for !w.targetTime.After(t) { - w.targetTime = w.targetTime.Add(w.stepInterval) - } - newWaiters = append(newWaiters, *w) - } - - } else { - newWaiters = append(newWaiters, f.waiters[i]) - } - } - f.waiters = newWaiters -} - -// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied -// (so you can write race-free tests). -func (f *FakeClock) HasWaiters() bool { - f.lock.RLock() - defer f.lock.RUnlock() - return len(f.waiters) > 0 -} - -// Sleep pauses the FakeClock for duration d. -func (f *FakeClock) Sleep(d time.Duration) { - f.Step(d) -} - -// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration -type IntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *IntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *IntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} - -// After is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) After(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement After") -} - -// AfterFunc is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) AfterFunc(d time.Duration, cb func()) Timer { - panic("IntervalClock doesn't implement AfterFunc") -} - -// NewTimer is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTimer(d time.Duration) Timer { - panic("IntervalClock doesn't implement NewTimer") -} - -// NewTicker is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTicker(d time.Duration) Ticker { - panic("IntervalClock doesn't implement NewTicker") -} - -// Sleep is currently unimplemented; will panic. -func (*IntervalClock) Sleep(d time.Duration) { - panic("IntervalClock doesn't implement Sleep") -} - -// Timer allows for injecting fake or real timers into code that -// needs to do arbitrary things based on time. -type Timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// realTimer is backed by an actual time.Timer. -type realTimer struct { - timer *time.Timer -} - -// C returns the underlying timer's channel. -func (r *realTimer) C() <-chan time.Time { - return r.timer.C -} - -// Stop calls Stop() on the underlying timer. -func (r *realTimer) Stop() bool { - return r.timer.Stop() -} - -// Reset calls Reset() on the underlying timer. -func (r *realTimer) Reset(d time.Duration) bool { - return r.timer.Reset(d) -} - -// fakeTimer implements Timer based on a FakeClock. -type fakeTimer struct { - fakeClock *FakeClock - waiter fakeClockWaiter -} - -// C returns the channel that notifies when this timer has fired. -func (f *fakeTimer) C() <-chan time.Time { - return f.waiter.destChan -} - -// Stop conditionally stops the timer. If the timer has neither fired -// nor been stopped then this call stops the timer and returns true, -// otherwise this call returns false. This is like time.Timer::Stop. -func (f *fakeTimer) Stop() bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - // The timer has already fired or been stopped, unless it is found - // among the clock's waiters. - stopped := false - oldWaiters := f.fakeClock.waiters - newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) - seekChan := f.waiter.destChan - for i := range oldWaiters { - // Identify the timer's fakeClockWaiter by the identity of the - // destination channel, nothing else is necessarily unique and - // constant since the timer's creation. - if oldWaiters[i].destChan == seekChan { - stopped = true - } else { - newWaiters = append(newWaiters, oldWaiters[i]) - } - } - - f.fakeClock.waiters = newWaiters - - return stopped -} - -// Reset conditionally updates the firing time of the timer. If the -// timer has neither fired nor been stopped then this call resets the -// timer to the fake clock's "now" + d and returns true, otherwise -// it creates a new waiter, adds it to the clock, and returns true. -// -// It is not possible to return false, because a fake timer can be reset -// from any state (waiting to fire, already fired, and stopped). -// -// See the GoDoc for time.Timer::Reset for more context on why -// the return value of Reset() is not useful. -func (f *fakeTimer) Reset(d time.Duration) bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - waiters := f.fakeClock.waiters - seekChan := f.waiter.destChan - for i := range waiters { - if waiters[i].destChan == seekChan { - waiters[i].targetTime = f.fakeClock.time.Add(d) - return true - } - } - // No existing waiter, timer has already fired or been reset. - // We should still enable Reset() to succeed by creating a - // new waiter and adding it to the clock's waiters. - newWaiter := fakeClockWaiter{ - targetTime: f.fakeClock.time.Add(d), - destChan: seekChan, - } - f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter) - return true -} - -// Ticker defines the Ticker interface -type Ticker interface { - C() <-chan time.Time - Stop() -} - -type realTicker struct { - ticker *time.Ticker -} - -func (t *realTicker) C() <-chan time.Time { - return t.ticker.C -} - -func (t *realTicker) Stop() { - t.ticker.Stop() -} - -type fakeTicker struct { - c <-chan time.Time -} - -func (t *fakeTicker) C() <-chan time.Time { - return t.c -} - -func (t *fakeTicker) Stop() { -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 45aa74bf582f1..10df0d99cd58a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -132,14 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // Return whatever remaining data exists from an in progress frame if n := len(r.remaining); n > 0 { if n <= len(data) { - //lint:ignore SA4006,SA4010 underlying array of data is modified here. + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining...) r.remaining = nil return n, nil } n = len(data) - //lint:ignore SA4006,SA4010 underlying array of data is modified here. + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining[:n]...) r.remaining = r.remaining[n:] return n, io.ErrShortBuffer @@ -157,7 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. if len(m) > n { - //lint:ignore SA4006,SA4010 underlying array of data is modified here. + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], m[:n]...) r.remaining = m[n:] return n, io.ErrShortBuffer diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go index 2501d55166cd8..a502b5adb6970 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -1,3 +1,4 @@ +//go:build !notest // +build !notest /* diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 778e58f704b9b..55dba361c3625 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -17,10 +17,11 @@ limitations under the License. package json import ( - "bytes" "encoding/json" "fmt" "io" + + kjson "sigs.k8s.io/json" ) // NewEncoder delegates to json.NewEncoder @@ -38,50 +39,11 @@ func Marshal(v interface{}) ([]byte, error) { // limit recursive depth to prevent stack overflow errors const maxDepth = 10000 -// Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers -// are converted to int64 or float64 +// Unmarshal unmarshals the given data. +// Object keys are case-sensitive. +// Numbers decoded into interface{} fields are converted to int64 or float64. func Unmarshal(data []byte, v interface{}) error { - switch v := v.(type) { - case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return ConvertMapNumbers(*v, 0) - - case *[]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return ConvertSliceNumbers(*v, 0) - - case *interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return ConvertInterfaceNumbers(v, 0) - - default: - return json.Unmarshal(data, v) - } + return kjson.UnmarshalCaseSensitivePreserveInts(data, v) } // ConvertInterfaceNumbers converts any json.Number values to int64 or float64. diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go index b7c95322577af..792badbc3d5ff 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go @@ -35,7 +35,22 @@ import ( // that no managed fields were found for the fieldManager because other field managers // have taken ownership of all the fields previously owned by the fieldManager. It is // also possible the fieldManager never owned fields. -func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}) error { +// +// The provided object MUST bo a root resource object since subresource objects +// do not contain their own managed fields. For example, an autoscaling.Scale +// object read from a "scale" subresource does not have any managed fields and so +// cannot be used as the object. +// +// If the fields of a subresource are a subset of the fields of the root object, +// and their field paths and types are exactly the same, then ExtractInto can be +// called with the root resource as the object and the subresource as the +// applyConfiguration. This works for "status", obviously, because status is +// represented by the exact same object as the root resource. This this does NOT +// work, for example, with the "scale" subresources of Deployment, ReplicaSet and +// StatefulSet. While the spec.replicas, status.replicas fields are in the same +// exact field path locations as they are in autoscaling.Scale, the selector +// fields are in different locations, and are a different type. +func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}, subresource string) error { typedObj, err := toTyped(object, objectType) if err != nil { return fmt.Errorf("error converting obj to typed: %w", err) @@ -45,7 +60,7 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan if err != nil { return fmt.Errorf("error accessing metadata: %w", err) } - fieldsEntry, ok := findManagedFields(accessor, fieldManager) + fieldsEntry, ok := findManagedFields(accessor, fieldManager, subresource) if !ok { return nil } @@ -60,16 +75,23 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan if !ok { return fmt.Errorf("unable to convert managed fields for %s to unstructured, expected map, got %T", fieldManager, u) } + + // set the type meta manually if it doesn't exist to avoid missing kind errors + // when decoding from unstructured JSON + if _, ok := m["kind"]; !ok && object.GetObjectKind().GroupVersionKind().Kind != "" { + m["kind"] = object.GetObjectKind().GroupVersionKind().Kind + m["apiVersion"] = object.GetObjectKind().GroupVersionKind().GroupVersion().String() + } if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, applyConfiguration); err != nil { return fmt.Errorf("error extracting into obj from unstructured: %w", err) } return nil } -func findManagedFields(accessor metav1.Object, fieldManager string) (metav1.ManagedFieldsEntry, bool) { +func findManagedFields(accessor metav1.Object, fieldManager string, subresource string) (metav1.ManagedFieldsEntry, bool) { objManagedFields := accessor.GetManagedFields() for _, mf := range objManagedFields { - if mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply { + if mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply && mf.Subresource == subresource { return mf, true } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go new file mode 100644 index 0000000000000..0cc228af936cd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// GvkParser contains a Parser that allows introspecting the schema. +type GvkParser struct { + gvks map[schema.GroupVersionKind]string + parser typed.Parser +} + +// Type returns a helper which can produce objects of the given type. Any +// errors are deferred until a further function is called. +func (p *GvkParser) Type(gvk schema.GroupVersionKind) *typed.ParseableType { + typeName, ok := p.gvks[gvk] + if !ok { + return nil + } + t := p.parser.Type(typeName) + return &t +} + +// NewGVKParser builds a GVKParser from a proto.Models. This +// will automatically find the proper version of the object, and the +// corresponding schema information. +func NewGVKParser(models proto.Models, preserveUnknownFields bool) (*GvkParser, error) { + typeSchema, err := schemaconv.ToSchemaWithPreserveUnknownFields(models, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + parser := GvkParser{ + gvks: map[schema.GroupVersionKind]string{}, + } + parser.parser = typed.Parser{Schema: *typeSchema} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName)) + } + gvkList := parseGroupVersionKind(model) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + _, ok := parser.gvks[gvk] + if ok { + return nil, fmt.Errorf("duplicate entry for %v", gvk) + } + parser.gvks[gvk] = modelName + } + } + } + return &parser, nil +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index ce69b8054b514..04432b175d668 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "k8s.io/klog/v2" + netutils "k8s.io/utils/net" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -113,6 +114,7 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } // If no custom dialer is set, use the default context dialer + //lint:file-ignore SA1019 Keep supporting deprecated Dial method of custom transports if t.DialContext == nil && t.Dial == nil { t.DialContext = defaultTransport.DialContext } @@ -236,6 +238,29 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) { } } +// CloseIdleConnectionsFor close idles connections for the Transport. +// If the Transport is wrapped it iterates over the wrapped round trippers +// until it finds one that implements the CloseIdleConnections method. +// If the Transport does not have a CloseIdleConnections method +// then this function does nothing. +func CloseIdleConnectionsFor(transport http.RoundTripper) { + if transport == nil { + return + } + type closeIdler interface { + CloseIdleConnections() + } + + switch transport := transport.(type) { + case closeIdler: + transport.CloseIdleConnections() + case RoundTripperWrapper: + CloseIdleConnectionsFor(transport.WrappedRoundTripper()) + default: + klog.Warningf("unknown transport type: %T", transport) + } +} + type TLSClientConfigHolder interface { TLSClientConfig() *tls.Config } @@ -288,7 +313,7 @@ func SourceIPs(req *http.Request) []net.IP { // Use the first valid one. parts := strings.Split(hdrForwardedFor, ",") for _, part := range parts { - ip := net.ParseIP(strings.TrimSpace(part)) + ip := netutils.ParseIPSloppy(strings.TrimSpace(part)) if ip != nil { srcIPs = append(srcIPs, ip) } @@ -298,7 +323,7 @@ func SourceIPs(req *http.Request) []net.IP { // Try the X-Real-Ip header. hdrRealIp := hdr.Get("X-Real-Ip") if hdrRealIp != "" { - ip := net.ParseIP(hdrRealIp) + ip := netutils.ParseIPSloppy(hdrRealIp) // Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain. if ip != nil && !containsIP(srcIPs, ip) { srcIPs = append(srcIPs, ip) @@ -310,11 +335,11 @@ func SourceIPs(req *http.Request) []net.IP { // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. host, _, err := net.SplitHostPort(req.RemoteAddr) if err == nil { - remoteIP = net.ParseIP(host) + remoteIP = netutils.ParseIPSloppy(host) } // Fallback if Remote Address was just IP. if remoteIP == nil { - remoteIP = net.ParseIP(req.RemoteAddr) + remoteIP = netutils.ParseIPSloppy(req.RemoteAddr) } // Don't duplicate remote IP if it's already the last address in the chain. @@ -381,7 +406,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error cidrs := []*net.IPNet{} for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) + _, cidr, _ := netutils.ParseCIDRSloppy(noProxyRule) if cidr != nil { cidrs = append(cidrs, cidr) } @@ -392,7 +417,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } return func(req *http.Request) (*url.URL, error) { - ip := net.ParseIP(req.URL.Hostname()) + ip := netutils.ParseIPSloppy(req.URL.Hostname()) if ip == nil { return delegate(req) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 9adf4cfe477a8..8224168064836 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -27,6 +27,7 @@ import ( "strings" "k8s.io/klog/v2" + netutils "k8s.io/utils/net" ) type AddressFamily uint @@ -221,7 +222,7 @@ func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) if len(addrs) > 0 { for i := range addrs { klog.V(4).Infof("Checking addr %s.", addrs[i].String()) - ip, _, err := net.ParseCIDR(addrs[i].String()) + ip, _, err := netutils.ParseCIDRSloppy(addrs[i].String()) if err != nil { return nil, err } @@ -336,7 +337,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam continue } for _, addr := range addrs { - ip, _, err := net.ParseCIDR(addr.String()) + ip, _, err := netutils.ParseCIDRSloppy(addr.String()) if err != nil { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 0cd5d65775a70..2ed368f569377 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -181,7 +181,7 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // valid values). func NotSupported(field *Path, value interface{}, validValues []string) *Error { detail := "" - if validValues != nil && len(validValues) > 0 { + if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { quotedValues[i] = strconv.Quote(v) @@ -239,6 +239,9 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { // ToAggregate converts the ErrorList into an errors.Aggregate. func (list ErrorList) ToAggregate() utilerrors.Aggregate { + if len(list) == 0 { + return nil + } errs := make([]error, 0, len(list)) errorMsgs := sets.NewString() for _, err := range list { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index c8b419984057b..83df4fb8d42bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -25,6 +25,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" + netutils "k8s.io/utils/net" ) const qnameCharFmt string = "[A-Za-z0-9]" @@ -346,7 +347,7 @@ func IsValidPortName(port string) []string { // IsValidIP tests that the argument is a valid IP address. func IsValidIP(value string) []string { - if net.ParseIP(value) == nil { + if netutils.ParseIPSloppy(value) == nil { return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} } return nil @@ -355,7 +356,7 @@ func IsValidIP(value string) []string { // IsValidIPv4Address tests that the argument is a valid IPv4 address. func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { var allErrors field.ErrorList - ip := net.ParseIP(value) + ip := netutils.ParseIPSloppy(value) if ip == nil || ip.To4() == nil { allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) } @@ -365,7 +366,7 @@ func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { // IsValidIPv6Address tests that the argument is a valid IPv6 address. func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { var allErrors field.ErrorList - ip := net.ParseIP(value) + ip := netutils.ParseIPSloppy(value) if ip == nil || ip.To4() != nil { allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 3dea7fe7f9eba..ec5be90b825a6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -24,8 +24,8 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" ) // For any test of the style: @@ -166,6 +166,9 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan // of every loop to prevent extra executions of f(). select { case <-stopCh: + if !t.Stop() { + <-t.C() + } return case <-t.C(): } @@ -205,10 +208,29 @@ var ErrWaitTimeout = errors.New("timed out waiting for the condition") // if the loop should be aborted. type ConditionFunc func() (done bool, err error) +// ConditionWithContextFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +// +// The caller passes along a context that can be used by the condition function. +type ConditionWithContextFunc func(context.Context) (done bool, err error) + +// WithContext converts a ConditionFunc into a ConditionWithContextFunc +func (cf ConditionFunc) WithContext() ConditionWithContextFunc { + return func(context.Context) (done bool, err error) { + return cf() + } +} + // runConditionWithCrashProtection runs a ConditionFunc with crash protection func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) +} + +// runConditionWithCrashProtectionWithContext runs a +// ConditionWithContextFunc with crash protection. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { defer runtime.HandleCrash() - return condition() + return condition(ctx) } // Backoff holds parameters applied to a Backoff function. @@ -418,38 +440,42 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { // // If you want to Poll something forever, see PollInfinite. func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return pollInternal(poller(interval, timeout), condition) -} - -func pollInternal(wait WaitFunc, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return WaitFor(wait, condition, done) + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) } -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. // -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. // // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return pollImmediateInternal(poller(interval, timeout), condition) +// If you want to Poll something forever, see PollInfinite. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) } -func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { - done, err := runConditionWithCrashProtection(condition) - if err != nil { - return err - } - if done { - return nil - } - return pollInternal(wait, condition) +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return PollUntilWithContext(ctx, interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) } // PollInfinite tries a condition func until it returns true or an error @@ -459,37 +485,45 @@ func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { // Some intervals may be missed if the condition takes too long or the time // window is too short. func PollInfinite(interval time.Duration, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return PollUntil(interval, condition, done) + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) } -// PollImmediateInfinite tries a condition func until it returns true or an error +// PollInfiniteWithContext tries a condition func until it returns true or an error // -// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// PollInfiniteWithContext always waits the interval before the run of 'condition'. // // Some intervals may be missed if the condition takes too long or the time // window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - done, err := runConditionWithCrashProtection(condition) - if err != nil { - return err - } - if done { - return nil - } - return PollInfinite(interval, condition) +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) } -// PollUntil tries a condition func until it returns true, an error or stopCh is -// closed. +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. // -// PollUntil always waits interval before the first run of 'condition'. +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return WaitFor(poller(interval, 0), condition, ctx.Done()) +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) } // PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. @@ -497,18 +531,67 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st // PollImmediateUntil runs the 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } } + select { - case <-stopCh: + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility return ErrWaitTimeout default: - return PollUntil(interval, condition, stopCh) + return WaitForWithContext(ctx, wait, condition) } } @@ -516,6 +599,20 @@ func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh // should be executed and is closed when the last test should be invoked. type WaitFunc func(done <-chan struct{}) <-chan struct{} +// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc +func (w WaitFunc) WithContext() WaitWithContextFunc { + return func(ctx context.Context) <-chan struct{} { + return w(ctx.Done()) + } +} + +// WaitWithContextFunc creates a channel that receives an item every time a test +// should be executed and is closed when the last test should be invoked. +// +// When the specified context gets cancelled or expires the function +// stops sending item and returns immediately. +type WaitWithContextFunc func(ctx context.Context) <-chan struct{} + // WaitFor continually checks 'fn' as driven by 'wait'. // // WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value @@ -532,13 +629,35 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{} // "uniform pseudo-random", the `fn` might still run one or multiple time, // though eventually `WaitFor` will return. func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - stopCh := make(chan struct{}) - defer close(stopCh) - c := wait(stopCh) + ctx, cancel := contextForChannel(done) + defer cancel() + return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) +} + +// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// +// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn' +// once for every value placed on the channel and once more when the +// channel is closed. If the channel is closed and 'fn' +// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// +// If 'fn' returns an error the loop ends and that error is returned. If +// 'fn' returns true the loop ends and nil is returned. +// +// context.Canceled will be returned if the ctx.Done() channel is closed +// without fn ever returning true. +// +// When the ctx.Done() channel is closed, because the golang `select` statement is +// "uniform pseudo-random", the `fn` might still run one or multiple times, +// though eventually `WaitForWithContext` will return. +func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { + waitCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := wait(waitCtx) for { select { case _, open := <-c: - ok, err := runConditionWithCrashProtection(fn) + ok, err := runConditionWithCrashProtectionWithContext(ctx, fn) if err != nil { return err } @@ -548,7 +667,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { if !open { return ErrWaitTimeout } - case <-done: + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility return ErrWaitTimeout } } @@ -564,8 +684,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitFunc { - return WaitFunc(func(done <-chan struct{}) <-chan struct{} { +func poller(interval, timeout time.Duration) WaitWithContextFunc { + return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { ch := make(chan struct{}) go func() { @@ -595,7 +715,7 @@ func poller(interval, timeout time.Duration) WaitFunc { } case <-after: return - case <-done: + case <-ctx.Done(): return } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 612d63a6948a1..9837b3df281b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -59,6 +59,34 @@ func Unmarshal(data []byte, v interface{}) error { } } +// UnmarshalStrict unmarshals the given data +// strictly (erroring when there are duplicate fields). +func UnmarshalStrict(data []byte, v interface{}) error { + preserveIntFloat := func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + } + switch v := v.(type) { + case *map[string]interface{}: + if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertMapNumbers(*v, 0) + case *[]interface{}: + if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertSliceNumbers(*v, 0) + case *interface{}: + if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertInterfaceNumbers(v, 0) + default: + return yaml.UnmarshalStrict(data, v) + } +} + // ToJSON converts a single YAML document into a JSON document // or returns an error. If the document appears to be JSON the // YAML decoding path is not used (so that error messages are @@ -291,15 +319,19 @@ func (r *YAMLReader) Read() ([]byte, error) { if i := bytes.Index(line, []byte(separator)); i == 0 { // We have a potential document terminator i += sep - after := line[i:] - if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { - if buffer.Len() != 0 { - return buffer.Bytes(), nil - } - if err == io.EOF { - return nil, err + trimmed := strings.TrimSpace(string(line[i:])) + // We only allow comments and spaces following the yaml doc separator, otherwise we'll return an error + if len(trimmed) > 0 && string(trimmed[0]) != "#" { + return nil, YAMLSyntaxError{ + err: fmt.Errorf("invalid Yaml document separator: %s", trimmed), } } + if buffer.Len() != 0 { + return buffer.Bytes(), nil + } + if err == io.EOF { + return nil, err + } } if err == io.EOF { if buffer.Len() != 0 { diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index 71ef4da3348d7..dd27d4526fcad 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000000..6a66aea5eafe0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go index 6c1658804f435..7ae061e3ca209 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -48,7 +48,7 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl // ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from // mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a // MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { + return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "") +} + +// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { + return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status") +} + +func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) { b := &MutatingWebhookConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b) + err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go index f7802f540ff1b..ae19ed81edcbb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go @@ -48,7 +48,7 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration // ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from // validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a // ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { + return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "") +} + +// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { + return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status") +} + +func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { b := &ValidatingWebhookConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b) + err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 47ed1e2522848..178745c23424e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -48,7 +48,7 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl // ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from // mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a // MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { + return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "") +} + +// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { + return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status") +} + +func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) { b := &MutatingWebhookConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration"), fieldManager, b) + err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go index dac1c27a0996a..e60d997f8a4d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -48,7 +48,7 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration // ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from // validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a // ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { + return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "") +} + +// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { + return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status") +} + +func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { b := &ValidatingWebhookConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration"), fieldManager, b) + err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go index 44d9b05c01614..180b7762597a7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go @@ -49,7 +49,7 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration { // ExtractStorageVersion extracts the applied configuration owned by fieldManager from // storageVersion. If no managedFields are found in storageVersion for fieldManager, a // StorageVersionApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // storageVersion must be a unmodified StorageVersion API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { + return extractStorageVersion(storageVersion, fieldManager, "") +} + +// ExtractStorageVersionStatus is the same as ExtractStorageVersion except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStorageVersionStatus(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { + return extractStorageVersion(storageVersion, fieldManager, "status") +} + +func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) { b := &StorageVersionApplyConfiguration{} - err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b) + err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go index d01f77a5e8028..28a0c582b83a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go @@ -51,7 +51,7 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // ExtractControllerRevision extracts the applied configuration owned by fieldManager from // controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a // ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { + return extractControllerRevision(controllerRevision, fieldManager, "") +} + +// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractControllerRevisionStatus(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { + return extractControllerRevision(controllerRevision, fieldManager, "status") +} + +func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} - err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1.ControllerRevision"), fieldManager, b) + err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1.ControllerRevision"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go index bcfe7a4a6423c..6dd8c6e889ee9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go @@ -50,7 +50,7 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // ExtractDaemonSet extracts the applied configuration owned by fieldManager from // daemonSet. If no managedFields are found in daemonSet for fieldManager, a // DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { + return extractDaemonSet(daemonSet, fieldManager, "") +} + +// ExtractDaemonSetStatus is the same as ExtractDaemonSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDaemonSetStatus(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { + return extractDaemonSet(daemonSet, fieldManager, "status") +} + +func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { b := &DaemonSetApplyConfiguration{} - err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b) + err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go index 37ef1896a2dca..d33321c52b794 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go @@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // ExtractDeployment extracts the applied configuration owned by fieldManager from // deployment. If no managedFields are found in deployment for fieldManager, a // DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDeployment(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "") +} + +// ExtractDeploymentStatus is the same as ExtractDeployment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeploymentStatus(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "status") +} + +func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { b := &DeploymentApplyConfiguration{} - err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b) + err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go index fc7a468e48cee..0affbf82f29db 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go @@ -50,7 +50,7 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // ExtractReplicaSet extracts the applied configuration owned by fieldManager from // replicaSet. If no managedFields are found in replicaSet for fieldManager, a // ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { + return extractReplicaSet(replicaSet, fieldManager, "") +} + +// ExtractReplicaSetStatus is the same as ExtractReplicaSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractReplicaSetStatus(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { + return extractReplicaSet(replicaSet, fieldManager, "status") +} + +func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { b := &ReplicaSetApplyConfiguration{} - err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b) + err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go index ff0d50862c355..7cb5ec12c19b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go @@ -50,7 +50,7 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // ExtractStatefulSet extracts the applied configuration owned by fieldManager from // statefulSet. If no managedFields are found in statefulSet for fieldManager, a // StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { + return extractStatefulSet(statefulSet, fieldManager, "") +} + +// ExtractStatefulSetStatus is the same as ExtractStatefulSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStatefulSetStatus(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { + return extractStatefulSet(statefulSet, fieldManager, "status") +} + +func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { b := &StatefulSetApplyConfiguration{} - err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b) + err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go new file mode 100644 index 0000000000000..ba01d5d3c10a2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// with apply. +type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { + WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// apply. +func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} +} + +// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenDeleted field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenDeleted = &value + return b +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index 502f00ba4509d..ee0ed40a5692b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -27,14 +27,16 @@ import ( // StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` - VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` - UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -111,3 +113,19 @@ func (b *StatefulSetSpecApplyConfiguration) WithRevisionHistoryLimit(value int32 b.RevisionHistoryLimit = &value return b } + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *StatefulSetSpecApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go index 58c07475e5495..d88881b6569bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go @@ -30,6 +30,7 @@ type StatefulSetStatusApplyConfiguration struct { UpdateRevision *string `json:"updateRevision,omitempty"` CollisionCount *int32 `json:"collisionCount,omitempty"` Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } // StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with @@ -114,3 +115,11 @@ func (b *StatefulSetStatusApplyConfiguration) WithConditions(values ...*Stateful } return b } + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *StatefulSetStatusApplyConfiguration) WithAvailableReplicas(value int32) *StatefulSetStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go index 4497fd69d6815..bcdfa44b2d320 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go @@ -51,7 +51,7 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // ExtractControllerRevision extracts the applied configuration owned by fieldManager from // controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a // ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { + return extractControllerRevision(controllerRevision, fieldManager, "") +} + +// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractControllerRevisionStatus(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { + return extractControllerRevision(controllerRevision, fieldManager, "status") +} + +func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} - err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b) + err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go index c79a614d3ad78..eddab17898888 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go @@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // ExtractDeployment extracts the applied configuration owned by fieldManager from // deployment. If no managedFields are found in deployment for fieldManager, a // DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDeployment(deployment *appsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "") +} + +// ExtractDeploymentStatus is the same as ExtractDeployment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeploymentStatus(deployment *appsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "status") +} + +func extractDeployment(deployment *appsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { b := &DeploymentApplyConfiguration{} - err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta1.Deployment"), fieldManager, b) + err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta1.Deployment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go index 909ee91f8f406..a4f64cd85bb4e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go @@ -50,7 +50,7 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // ExtractStatefulSet extracts the applied configuration owned by fieldManager from // statefulSet. If no managedFields are found in statefulSet for fieldManager, a // StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { + return extractStatefulSet(statefulSet, fieldManager, "") +} + +// ExtractStatefulSetStatus is the same as ExtractStatefulSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStatefulSetStatus(statefulSet *appsv1beta1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { + return extractStatefulSet(statefulSet, fieldManager, "status") +} + +func extractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { b := &StatefulSetApplyConfiguration{} - err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta1.StatefulSet"), fieldManager, b) + err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta1.StatefulSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go new file mode 100644 index 0000000000000..0048724c041cb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// with apply. +type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { + WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// apply. +func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} +} + +// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenDeleted field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenDeleted = &value + return b +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index 61b86a9d82f6a..886433d9ea316 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -27,14 +27,16 @@ import ( // StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` - VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` - UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -111,3 +113,19 @@ func (b *StatefulSetSpecApplyConfiguration) WithRevisionHistoryLimit(value int32 b.RevisionHistoryLimit = &value return b } + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *StatefulSetSpecApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go index b716352d26507..f31066b6ff461 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go @@ -30,6 +30,7 @@ type StatefulSetStatusApplyConfiguration struct { UpdateRevision *string `json:"updateRevision,omitempty"` CollisionCount *int32 `json:"collisionCount,omitempty"` Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } // StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with @@ -114,3 +115,11 @@ func (b *StatefulSetStatusApplyConfiguration) WithConditions(values ...*Stateful } return b } + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *StatefulSetStatusApplyConfiguration) WithAvailableReplicas(value int32) *StatefulSetStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go index 7c36cd82cb76a..f5d906162a126 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go @@ -51,7 +51,7 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // ExtractControllerRevision extracts the applied configuration owned by fieldManager from // controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a // ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { + return extractControllerRevision(controllerRevision, fieldManager, "") +} + +// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractControllerRevisionStatus(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { + return extractControllerRevision(controllerRevision, fieldManager, "status") +} + +func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} - err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b) + err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go index 174ee8a1967a6..92708f0a8b103 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go @@ -50,7 +50,7 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // ExtractDaemonSet extracts the applied configuration owned by fieldManager from // daemonSet. If no managedFields are found in daemonSet for fieldManager, a // DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { + return extractDaemonSet(daemonSet, fieldManager, "") +} + +// ExtractDaemonSetStatus is the same as ExtractDaemonSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDaemonSetStatus(daemonSet *appsv1beta2.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { + return extractDaemonSet(daemonSet, fieldManager, "status") +} + +func extractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { b := &DaemonSetApplyConfiguration{} - err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.DaemonSet"), fieldManager, b) + err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.DaemonSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go index c59ae290fe2b8..ad0c509db5fc1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go @@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // ExtractDeployment extracts the applied configuration owned by fieldManager from // deployment. If no managedFields are found in deployment for fieldManager, a // DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDeployment(deployment *appsv1beta2.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "") +} + +// ExtractDeploymentStatus is the same as ExtractDeployment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeploymentStatus(deployment *appsv1beta2.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "status") +} + +func extractDeployment(deployment *appsv1beta2.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { b := &DeploymentApplyConfiguration{} - err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta2.Deployment"), fieldManager, b) + err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta2.Deployment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go index 881c503ae8b45..e2998f2c33a67 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go @@ -50,7 +50,7 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // ExtractReplicaSet extracts the applied configuration owned by fieldManager from // replicaSet. If no managedFields are found in replicaSet for fieldManager, a // ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { + return extractReplicaSet(replicaSet, fieldManager, "") +} + +// ExtractReplicaSetStatus is the same as ExtractReplicaSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractReplicaSetStatus(replicaSet *appsv1beta2.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { + return extractReplicaSet(replicaSet, fieldManager, "status") +} + +func extractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { b := &ReplicaSetApplyConfiguration{} - err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.ReplicaSet"), fieldManager, b) + err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.ReplicaSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go new file mode 100644 index 0000000000000..d4901edf9bebe --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go @@ -0,0 +1,233 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// with apply. +type ScaleApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *v1beta2.ScaleSpec `json:"spec,omitempty"` + Status *v1beta2.ScaleStatus `json:"status,omitempty"` +} + +// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// apply. +func Scale() *ScaleApplyConfiguration { + return &ScaleApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go index 26a18794720ff..0a242310cca35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go @@ -50,7 +50,7 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // ExtractStatefulSet extracts the applied configuration owned by fieldManager from // statefulSet. If no managedFields are found in statefulSet for fieldManager, a // StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { + return extractStatefulSet(statefulSet, fieldManager, "") +} + +// ExtractStatefulSetStatus is the same as ExtractStatefulSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStatefulSetStatus(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { + return extractStatefulSet(statefulSet, fieldManager, "status") +} + +func extractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { b := &StatefulSetApplyConfiguration{} - err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.StatefulSet"), fieldManager, b) + err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.StatefulSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go new file mode 100644 index 0000000000000..aee27803d3b61 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// with apply. +type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { + WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// apply. +func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} +} + +// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenDeleted field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenDeleted = &value + return b +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index eb2c8555b94e3..08922cea5760b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -27,14 +27,16 @@ import ( // StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` - VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` - UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -111,3 +113,19 @@ func (b *StatefulSetSpecApplyConfiguration) WithRevisionHistoryLimit(value int32 b.RevisionHistoryLimit = &value return b } + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *StatefulSetSpecApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go index 73f7c0b3f5c7e..63835904c1737 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go @@ -30,6 +30,7 @@ type StatefulSetStatusApplyConfiguration struct { UpdateRevision *string `json:"updateRevision,omitempty"` CollisionCount *int32 `json:"collisionCount,omitempty"` Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } // StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with @@ -114,3 +115,11 @@ func (b *StatefulSetStatusApplyConfiguration) WithConditions(values ...*Stateful } return b } + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *StatefulSetStatusApplyConfiguration) WithAvailableReplicas(value int32) *StatefulSetStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go index 6736bfb17cc7e..bf04b01313f1e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go @@ -50,7 +50,7 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from // horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a // HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") +} + +// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") +} + +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { b := &HorizontalPodAutoscalerApplyConfiguration{} - err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b) + err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go new file mode 100644 index 0000000000000..2d2cfeb972fc2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go @@ -0,0 +1,232 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// with apply. +type ScaleApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` +} + +// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// apply. +func Scale() *ScaleApplyConfiguration { + return &ScaleApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithSpec(value *ScaleSpecApplyConfiguration) *ScaleApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguration) *ScaleApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go new file mode 100644 index 0000000000000..2339a8fef2f6b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use +// with apply. +type ScaleSpecApplyConfiguration struct { + Replicas *int32 `json:"replicas,omitempty"` +} + +// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with +// apply. +func ScaleSpec() *ScaleSpecApplyConfiguration { + return &ScaleSpecApplyConfiguration{} +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *ScaleSpecApplyConfiguration) WithReplicas(value int32) *ScaleSpecApplyConfiguration { + b.Replicas = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go new file mode 100644 index 0000000000000..81c8d1b30a100 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use +// with apply. +type ScaleStatusApplyConfiguration struct { + Replicas *int32 `json:"replicas,omitempty"` + Selector *string `json:"selector,omitempty"` +} + +// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with +// apply. +func ScaleStatus() *ScaleStatusApplyConfiguration { + return &ScaleStatusApplyConfiguration{} +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *ScaleStatusApplyConfiguration) WithReplicas(value int32) *ScaleStatusApplyConfiguration { + b.Replicas = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ScaleStatusApplyConfiguration) WithSelector(value string) *ScaleStatusApplyConfiguration { + b.Selector = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go new file mode 100644 index 0000000000000..15ef216d1bc9e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// with apply. +type ContainerResourceMetricSourceApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` + Container *string `json:"container,omitempty"` +} + +// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// apply. +func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { + return &ContainerResourceMetricSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ContainerResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *ContainerResourceMetricSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ContainerResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ContainerResourceMetricSourceApplyConfiguration { + b.Target = value + return b +} + +// WithContainer sets the Container field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Container field is set to the value of the last call. +func (b *ContainerResourceMetricSourceApplyConfiguration) WithContainer(value string) *ContainerResourceMetricSourceApplyConfiguration { + b.Container = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go new file mode 100644 index 0000000000000..34213bca3f4c0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// with apply. +type ContainerResourceMetricStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` + Container *string `json:"container,omitempty"` +} + +// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// apply. +func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { + return &ContainerResourceMetricStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ContainerResourceMetricStatusApplyConfiguration) WithName(value v1.ResourceName) *ContainerResourceMetricStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ContainerResourceMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ContainerResourceMetricStatusApplyConfiguration { + b.Current = value + return b +} + +// WithContainer sets the Container field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Container field is set to the value of the last call. +func (b *ContainerResourceMetricStatusApplyConfiguration) WithContainer(value string) *ContainerResourceMetricStatusApplyConfiguration { + b.Container = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go new file mode 100644 index 0000000000000..19045706dc8cb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// with apply. +type CrossVersionObjectReferenceApplyConfiguration struct { + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + APIVersion *string `json:"apiVersion,omitempty"` +} + +// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// apply. +func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { + return &CrossVersionObjectReferenceApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CrossVersionObjectReferenceApplyConfiguration) WithKind(value string) *CrossVersionObjectReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CrossVersionObjectReferenceApplyConfiguration) WithName(value string) *CrossVersionObjectReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CrossVersionObjectReferenceApplyConfiguration) WithAPIVersion(value string) *CrossVersionObjectReferenceApplyConfiguration { + b.APIVersion = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go new file mode 100644 index 0000000000000..11a8eff263d53 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// with apply. +type ExternalMetricSourceApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// apply. +func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { + return &ExternalMetricSourceApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ExternalMetricSourceApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ExternalMetricSourceApplyConfiguration { + b.Metric = value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ExternalMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ExternalMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go new file mode 100644 index 0000000000000..3b1a0329b81a3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// with apply. +type ExternalMetricStatusApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` +} + +// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// apply. +func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { + return &ExternalMetricStatusApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ExternalMetricStatusApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ExternalMetricStatusApplyConfiguration { + b.Metric = value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ExternalMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ExternalMetricStatusApplyConfiguration { + b.Current = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go new file mode 100644 index 0000000000000..af805488ecc73 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -0,0 +1,276 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// with apply. +type HorizontalPodAutoscalerApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"` + Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` +} + +// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// apply. +func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { + b := &HorizontalPodAutoscalerApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("HorizontalPodAutoscaler") + b.WithAPIVersion("autoscaling/v2") + return b +} + +// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from +// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a +// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API. +// ExtractHorizontalPodAutoscaler provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") +} + +// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") +} + +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + b := &HorizontalPodAutoscalerApplyConfiguration{} + err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(horizontalPodAutoscaler.Name) + b.WithNamespace(horizontalPodAutoscaler.Namespace) + + b.WithKind("HorizontalPodAutoscaler") + b.WithAPIVersion("autoscaling/v2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithSelfLink(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithClusterName(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithSpec(value *HorizontalPodAutoscalerSpecApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *HorizontalPodAutoscalerStatusApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go new file mode 100644 index 0000000000000..e6fdabd7c88b4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// with apply. +type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { + ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` + ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` +} + +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// apply. +func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { + return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} +} + +// WithScaleUp sets the ScaleUp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScaleUp field is set to the value of the last call. +func (b *HorizontalPodAutoscalerBehaviorApplyConfiguration) WithScaleUp(value *HPAScalingRulesApplyConfiguration) *HorizontalPodAutoscalerBehaviorApplyConfiguration { + b.ScaleUp = value + return b +} + +// WithScaleDown sets the ScaleDown field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScaleDown field is set to the value of the last call. +func (b *HorizontalPodAutoscalerBehaviorApplyConfiguration) WithScaleDown(value *HPAScalingRulesApplyConfiguration) *HorizontalPodAutoscalerBehaviorApplyConfiguration { + b.ScaleDown = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go new file mode 100644 index 0000000000000..c020eccd3d6d2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -0,0 +1,81 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// with apply. +type HorizontalPodAutoscalerConditionApplyConfiguration struct { + Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// apply. +func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { + return &HorizontalPodAutoscalerConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithReason(value string) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithMessage(value string) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go new file mode 100644 index 0000000000000..c36bc3f225c27 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// with apply. +type HorizontalPodAutoscalerSpecApplyConfiguration struct { + ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` + MinReplicas *int32 `json:"minReplicas,omitempty"` + MaxReplicas *int32 `json:"maxReplicas,omitempty"` + Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"` + Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` +} + +// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// apply. +func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { + return &HorizontalPodAutoscalerSpecApplyConfiguration{} +} + +// WithScaleTargetRef sets the ScaleTargetRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScaleTargetRef field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithScaleTargetRef(value *CrossVersionObjectReferenceApplyConfiguration) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.ScaleTargetRef = value + return b +} + +// WithMinReplicas sets the MinReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithMinReplicas(value int32) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.MinReplicas = &value + return b +} + +// WithMaxReplicas sets the MaxReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithMaxReplicas(value int32) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.MaxReplicas = &value + return b +} + +// WithMetrics adds the given value to the Metrics field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Metrics field. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithMetrics(values ...*MetricSpecApplyConfiguration) *HorizontalPodAutoscalerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetrics") + } + b.Metrics = append(b.Metrics, *values[i]) + } + return b +} + +// WithBehavior sets the Behavior field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Behavior field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithBehavior(value *HorizontalPodAutoscalerBehaviorApplyConfiguration) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.Behavior = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go new file mode 100644 index 0000000000000..d4d551df8561a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// with apply. +type HorizontalPodAutoscalerStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"` + CurrentReplicas *int32 `json:"currentReplicas,omitempty"` + DesiredReplicas *int32 `json:"desiredReplicas,omitempty"` + CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"` + Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// apply. +func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { + return &HorizontalPodAutoscalerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithObservedGeneration(value int64) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithLastScaleTime sets the LastScaleTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastScaleTime field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value v1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.LastScaleTime = &value + return b +} + +// WithCurrentReplicas sets the CurrentReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithCurrentReplicas(value int32) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.CurrentReplicas = &value + return b +} + +// WithDesiredReplicas sets the DesiredReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DesiredReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithDesiredReplicas(value int32) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.DesiredReplicas = &value + return b +} + +// WithCurrentMetrics adds the given value to the CurrentMetrics field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CurrentMetrics field. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithCurrentMetrics(values ...*MetricStatusApplyConfiguration) *HorizontalPodAutoscalerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCurrentMetrics") + } + b.CurrentMetrics = append(b.CurrentMetrics, *values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithConditions(values ...*HorizontalPodAutoscalerConditionApplyConfiguration) *HorizontalPodAutoscalerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go new file mode 100644 index 0000000000000..139f0fb5c75d2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// with apply. +type HPAScalingPolicyApplyConfiguration struct { + Type *v2.HPAScalingPolicyType `json:"type,omitempty"` + Value *int32 `json:"value,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` +} + +// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// apply. +func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { + return &HPAScalingPolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { + b.Type = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *HPAScalingPolicyApplyConfiguration) WithValue(value int32) *HPAScalingPolicyApplyConfiguration { + b.Value = &value + return b +} + +// WithPeriodSeconds sets the PeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PeriodSeconds field is set to the value of the last call. +func (b *HPAScalingPolicyApplyConfiguration) WithPeriodSeconds(value int32) *HPAScalingPolicyApplyConfiguration { + b.PeriodSeconds = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go new file mode 100644 index 0000000000000..e768076aa4a00 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// with apply. +type HPAScalingRulesApplyConfiguration struct { + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` + SelectPolicy *v2.ScalingPolicySelect `json:"selectPolicy,omitempty"` + Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` +} + +// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// apply. +func HPAScalingRules() *HPAScalingRulesApplyConfiguration { + return &HPAScalingRulesApplyConfiguration{} +} + +// WithStabilizationWindowSeconds sets the StabilizationWindowSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StabilizationWindowSeconds field is set to the value of the last call. +func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value int32) *HPAScalingRulesApplyConfiguration { + b.StabilizationWindowSeconds = &value + return b +} + +// WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelectPolicy field is set to the value of the last call. +func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { + b.SelectPolicy = &value + return b +} + +// WithPolicies adds the given value to the Policies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Policies field. +func (b *HPAScalingRulesApplyConfiguration) WithPolicies(values ...*HPAScalingPolicyApplyConfiguration) *HPAScalingRulesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPolicies") + } + b.Policies = append(b.Policies, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go new file mode 100644 index 0000000000000..312ad3ddd6a2b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// with apply. +type MetricIdentifierApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` +} + +// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// apply. +func MetricIdentifier() *MetricIdentifierApplyConfiguration { + return &MetricIdentifierApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MetricIdentifierApplyConfiguration) WithName(value string) *MetricIdentifierApplyConfiguration { + b.Name = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *MetricIdentifierApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *MetricIdentifierApplyConfiguration { + b.Selector = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go new file mode 100644 index 0000000000000..094ead6c16d6d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// with apply. +type MetricSpecApplyConfiguration struct { + Type *v2.MetricSourceType `json:"type,omitempty"` + Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` + Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` + Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` + ContainerResource *ContainerResourceMetricSourceApplyConfiguration `json:"containerResource,omitempty"` + External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` +} + +// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// apply. +func MetricSpec() *MetricSpecApplyConfiguration { + return &MetricSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithType(value v2.MetricSourceType) *MetricSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithObject sets the Object field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Object field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithObject(value *ObjectMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.Object = value + return b +} + +// WithPods sets the Pods field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pods field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithPods(value *PodsMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.Pods = value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithResource(value *ResourceMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.Resource = value + return b +} + +// WithContainerResource sets the ContainerResource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerResource field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithContainerResource(value *ContainerResourceMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.ContainerResource = value + return b +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithExternal(value *ExternalMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go new file mode 100644 index 0000000000000..c65ad446f0f3a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// with apply. +type MetricStatusApplyConfiguration struct { + Type *v2.MetricSourceType `json:"type,omitempty"` + Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` + Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` + Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` + ContainerResource *ContainerResourceMetricStatusApplyConfiguration `json:"containerResource,omitempty"` + External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` +} + +// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// apply. +func MetricStatus() *MetricStatusApplyConfiguration { + return &MetricStatusApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithType(value v2.MetricSourceType) *MetricStatusApplyConfiguration { + b.Type = &value + return b +} + +// WithObject sets the Object field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Object field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithObject(value *ObjectMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.Object = value + return b +} + +// WithPods sets the Pods field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pods field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithPods(value *PodsMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.Pods = value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithResource(value *ResourceMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.Resource = value + return b +} + +// WithContainerResource sets the ContainerResource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerResource field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithContainerResource(value *ContainerResourceMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.ContainerResource = value + return b +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithExternal(value *ExternalMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go new file mode 100644 index 0000000000000..f301e4d2be25d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// with apply. +type MetricTargetApplyConfiguration struct { + Type *v2.MetricTargetType `json:"type,omitempty"` + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` +} + +// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// apply. +func MetricTarget() *MetricTargetApplyConfiguration { + return &MetricTargetApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithType(value v2.MetricTargetType) *MetricTargetApplyConfiguration { + b.Type = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithValue(value resource.Quantity) *MetricTargetApplyConfiguration { + b.Value = &value + return b +} + +// WithAverageValue sets the AverageValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageValue field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithAverageValue(value resource.Quantity) *MetricTargetApplyConfiguration { + b.AverageValue = &value + return b +} + +// WithAverageUtilization sets the AverageUtilization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageUtilization field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithAverageUtilization(value int32) *MetricTargetApplyConfiguration { + b.AverageUtilization = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go new file mode 100644 index 0000000000000..e8474b18905ee --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// with apply. +type MetricValueStatusApplyConfiguration struct { + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` +} + +// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// apply. +func MetricValueStatus() *MetricValueStatusApplyConfiguration { + return &MetricValueStatusApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *MetricValueStatusApplyConfiguration) WithValue(value resource.Quantity) *MetricValueStatusApplyConfiguration { + b.Value = &value + return b +} + +// WithAverageValue sets the AverageValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageValue field is set to the value of the last call. +func (b *MetricValueStatusApplyConfiguration) WithAverageValue(value resource.Quantity) *MetricValueStatusApplyConfiguration { + b.AverageValue = &value + return b +} + +// WithAverageUtilization sets the AverageUtilization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageUtilization field is set to the value of the last call. +func (b *MetricValueStatusApplyConfiguration) WithAverageUtilization(value int32) *MetricValueStatusApplyConfiguration { + b.AverageUtilization = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go new file mode 100644 index 0000000000000..a9482565e069a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// with apply. +type ObjectMetricSourceApplyConfiguration struct { + DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` +} + +// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// apply. +func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { + return &ObjectMetricSourceApplyConfiguration{} +} + +// WithDescribedObject sets the DescribedObject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DescribedObject field is set to the value of the last call. +func (b *ObjectMetricSourceApplyConfiguration) WithDescribedObject(value *CrossVersionObjectReferenceApplyConfiguration) *ObjectMetricSourceApplyConfiguration { + b.DescribedObject = value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ObjectMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ObjectMetricSourceApplyConfiguration { + b.Target = value + return b +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ObjectMetricSourceApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ObjectMetricSourceApplyConfiguration { + b.Metric = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go new file mode 100644 index 0000000000000..70ba43beddf95 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// with apply. +type ObjectMetricStatusApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` + DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` +} + +// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// apply. +func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { + return &ObjectMetricStatusApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ObjectMetricStatusApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ObjectMetricStatusApplyConfiguration { + b.Metric = value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ObjectMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ObjectMetricStatusApplyConfiguration { + b.Current = value + return b +} + +// WithDescribedObject sets the DescribedObject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DescribedObject field is set to the value of the last call. +func (b *ObjectMetricStatusApplyConfiguration) WithDescribedObject(value *CrossVersionObjectReferenceApplyConfiguration) *ObjectMetricStatusApplyConfiguration { + b.DescribedObject = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go new file mode 100644 index 0000000000000..86601cc48a896 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use +// with apply. +type PodResourceMetricSourceApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with +// apply. +func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { + return &PodResourceMetricSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go new file mode 100644 index 0000000000000..0a7a5c2595ed2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// with apply. +type PodsMetricSourceApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// apply. +func PodsMetricSource() *PodsMetricSourceApplyConfiguration { + return &PodsMetricSourceApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *PodsMetricSourceApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *PodsMetricSourceApplyConfiguration { + b.Metric = value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *PodsMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodsMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go new file mode 100644 index 0000000000000..865fcc33e30d2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// with apply. +type PodsMetricStatusApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` +} + +// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// apply. +func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { + return &PodsMetricStatusApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *PodsMetricStatusApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *PodsMetricStatusApplyConfiguration { + b.Metric = value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *PodsMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *PodsMetricStatusApplyConfiguration { + b.Current = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go new file mode 100644 index 0000000000000..25a065fef6770 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// with apply. +type ResourceMetricSourceApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// apply. +func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { + return &ResourceMetricSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *ResourceMetricSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ResourceMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go new file mode 100644 index 0000000000000..fb5625afab35e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// with apply. +type ResourceMetricStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` +} + +// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// apply. +func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { + return &ResourceMetricStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceMetricStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceMetricStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ResourceMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ResourceMetricStatusApplyConfiguration { + b.Current = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go index 280ae05095ab7..e2c24646be469 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -50,7 +50,7 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from // horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a // HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") +} + +// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") +} + +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { b := &HorizontalPodAutoscalerApplyConfiguration{} - err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"), fieldManager, b) + err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go index 0ae4a195368dc..381925b23bc69 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -50,7 +50,7 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from // horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a // HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") +} + +// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") +} + +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { b := &HorizontalPodAutoscalerApplyConfiguration{} - err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"), fieldManager, b) + err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go index af38708a0a7ad..749163cc30cd2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go @@ -50,7 +50,7 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration { // ExtractCronJob extracts the applied configuration owned by fieldManager from // cronJob. If no managedFields are found in cronJob for fieldManager, a // CronJobApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cronJob must be a unmodified CronJob API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCronJob(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { + return extractCronJob(cronJob, fieldManager, "") +} + +// ExtractCronJobStatus is the same as ExtractCronJob except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCronJobStatus(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { + return extractCronJob(cronJob, fieldManager, "status") +} + +func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) { b := &CronJobApplyConfiguration{} - err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b) + err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go index 0c9bcdb75152c..bb84a58b02d01 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go @@ -50,7 +50,7 @@ func Job(name, namespace string) *JobApplyConfiguration { // ExtractJob extracts the applied configuration owned by fieldManager from // job. If no managedFields are found in job for fieldManager, a // JobApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // job must be a unmodified Job API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Job(name, namespace string) *JobApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractJob(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { + return extractJob(job, fieldManager, "") +} + +// ExtractJobStatus is the same as ExtractJob except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractJobStatus(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { + return extractJob(job, fieldManager, "status") +} + +func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) { b := &JobApplyConfiguration{} - err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b) + err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index e59d49cf1aa64..a36d5d0ae11d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -25,13 +25,15 @@ import ( // JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use // with apply. type JobStatusApplyConfiguration struct { - Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"` - StartTime *metav1.Time `json:"startTime,omitempty"` - CompletionTime *metav1.Time `json:"completionTime,omitempty"` - Active *int32 `json:"active,omitempty"` - Succeeded *int32 `json:"succeeded,omitempty"` - Failed *int32 `json:"failed,omitempty"` - CompletedIndexes *string `json:"completedIndexes,omitempty"` + Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + Active *int32 `json:"active,omitempty"` + Succeeded *int32 `json:"succeeded,omitempty"` + Failed *int32 `json:"failed,omitempty"` + CompletedIndexes *string `json:"completedIndexes,omitempty"` + UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"` + Ready *int32 `json:"ready,omitempty"` } // JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with @@ -100,3 +102,19 @@ func (b *JobStatusApplyConfiguration) WithCompletedIndexes(value string) *JobSta b.CompletedIndexes = &value return b } + +// WithUncountedTerminatedPods sets the UncountedTerminatedPods field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UncountedTerminatedPods field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithUncountedTerminatedPods(value *UncountedTerminatedPodsApplyConfiguration) *JobStatusApplyConfiguration { + b.UncountedTerminatedPods = value + return b +} + +// WithReady sets the Ready field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ready field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithReady(value int32) *JobStatusApplyConfiguration { + b.Ready = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go new file mode 100644 index 0000000000000..1409303fff3ac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use +// with apply. +type UncountedTerminatedPodsApplyConfiguration struct { + Succeeded []types.UID `json:"succeeded,omitempty"` + Failed []types.UID `json:"failed,omitempty"` +} + +// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with +// apply. +func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration { + return &UncountedTerminatedPodsApplyConfiguration{} +} + +// WithSucceeded adds the given value to the Succeeded field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Succeeded field. +func (b *UncountedTerminatedPodsApplyConfiguration) WithSucceeded(values ...types.UID) *UncountedTerminatedPodsApplyConfiguration { + for i := range values { + b.Succeeded = append(b.Succeeded, values[i]) + } + return b +} + +// WithFailed adds the given value to the Failed field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Failed field. +func (b *UncountedTerminatedPodsApplyConfiguration) WithFailed(values ...types.UID) *UncountedTerminatedPodsApplyConfiguration { + for i := range values { + b.Failed = append(b.Failed, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go index ddcef76fd2597..8b16bc55c2041 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go @@ -50,7 +50,7 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration { // ExtractCronJob extracts the applied configuration owned by fieldManager from // cronJob. If no managedFields are found in cronJob for fieldManager, a // CronJobApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cronJob must be a unmodified CronJob API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { + return extractCronJob(cronJob, fieldManager, "") +} + +// ExtractCronJobStatus is the same as ExtractCronJob except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCronJobStatus(cronJob *batchv1beta1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { + return extractCronJob(cronJob, fieldManager, "status") +} + +func extractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) { b := &CronJobApplyConfiguration{} - err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1beta1.CronJob"), fieldManager, b) + err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1beta1.CronJob"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go index 99c710a0fb164..9d46541b74887 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go @@ -49,7 +49,7 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi // ExtractCertificateSigningRequest extracts the applied configuration owned by fieldManager from // certificateSigningRequest. If no managedFields are found in certificateSigningRequest for fieldManager, a // CertificateSigningRequestApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // certificateSigningRequest must be a unmodified CertificateSigningRequest API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { + return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "") +} + +// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCertificateSigningRequestStatus(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { + return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status") +} + +func extractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) { b := &CertificateSigningRequestApplyConfiguration{} - err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b) + err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go index 7c4d2c98e2ddc..81ca214a9dc6e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go @@ -25,13 +25,14 @@ import ( // CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { - Request []byte `json:"request,omitempty"` - SignerName *string `json:"signerName,omitempty"` - Usages []v1.KeyUsage `json:"usages,omitempty"` - Username *string `json:"username,omitempty"` - UID *string `json:"uid,omitempty"` - Groups []string `json:"groups,omitempty"` - Extra map[string]v1.ExtraValue `json:"extra,omitempty"` + Request []byte `json:"request,omitempty"` + SignerName *string `json:"signerName,omitempty"` + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` + Usages []v1.KeyUsage `json:"usages,omitempty"` + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]v1.ExtraValue `json:"extra,omitempty"` } // CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with @@ -58,6 +59,14 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithSignerName(value s return b } +// WithExpirationSeconds sets the ExpirationSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpirationSeconds field is set to the value of the last call. +func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds(value int32) *CertificateSigningRequestSpecApplyConfiguration { + b.ExpirationSeconds = &value + return b +} + // WithUsages adds the given value to the Usages field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Usages field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go index 920b5319a9072..907b81983e8d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go @@ -49,7 +49,7 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi // ExtractCertificateSigningRequest extracts the applied configuration owned by fieldManager from // certificateSigningRequest. If no managedFields are found in certificateSigningRequest for fieldManager, a // CertificateSigningRequestApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // certificateSigningRequest must be a unmodified CertificateSigningRequest API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { + return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "") +} + +// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { + return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status") +} + +func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) { b := &CertificateSigningRequestApplyConfiguration{} - err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.CertificateSigningRequest"), fieldManager, b) + err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.CertificateSigningRequest"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go index 73ea58e5ec659..9554b1f400a61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go @@ -25,13 +25,14 @@ import ( // CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { - Request []byte `json:"request,omitempty"` - SignerName *string `json:"signerName,omitempty"` - Usages []v1beta1.KeyUsage `json:"usages,omitempty"` - Username *string `json:"username,omitempty"` - UID *string `json:"uid,omitempty"` - Groups []string `json:"groups,omitempty"` - Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` + Request []byte `json:"request,omitempty"` + SignerName *string `json:"signerName,omitempty"` + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` + Usages []v1beta1.KeyUsage `json:"usages,omitempty"` + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` } // CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with @@ -58,6 +59,14 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithSignerName(value s return b } +// WithExpirationSeconds sets the ExpirationSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpirationSeconds field is set to the value of the last call. +func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds(value int32) *CertificateSigningRequestSpecApplyConfiguration { + b.ExpirationSeconds = &value + return b +} + // WithUsages adds the given value to the Usages field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Usages field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go index ad552f2a5ead7..fcaddb663b497 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go @@ -49,7 +49,7 @@ func Lease(name, namespace string) *LeaseApplyConfiguration { // ExtractLease extracts the applied configuration owned by fieldManager from // lease. If no managedFields are found in lease for fieldManager, a // LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // lease must be a unmodified Lease API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Lease(name, namespace string) *LeaseApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractLease(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { + return extractLease(lease, fieldManager, "") +} + +// ExtractLeaseStatus is the same as ExtractLease except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLeaseStatus(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { + return extractLease(lease, fieldManager, "status") +} + +func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) { b := &LeaseApplyConfiguration{} - err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b) + err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go index 9093cfc543447..f63ddc29eece8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go @@ -49,7 +49,7 @@ func Lease(name, namespace string) *LeaseApplyConfiguration { // ExtractLease extracts the applied configuration owned by fieldManager from // lease. If no managedFields are found in lease for fieldManager, a // LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // lease must be a unmodified Lease API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Lease(name, namespace string) *LeaseApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractLease(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { + return extractLease(lease, fieldManager, "") +} + +// ExtractLeaseStatus is the same as ExtractLease except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLeaseStatus(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { + return extractLease(lease, fieldManager, "status") +} + +func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) { b := &LeaseApplyConfiguration{} - err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1beta1.Lease"), fieldManager, b) + err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1beta1.Lease"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go index 9328bdd760268..6983a689b2f4a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go @@ -48,7 +48,7 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration { // ExtractComponentStatus extracts the applied configuration owned by fieldManager from // componentStatus. If no managedFields are found in componentStatus for fieldManager, a // ComponentStatusApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // componentStatus must be a unmodified ComponentStatus API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { + return extractComponentStatus(componentStatus, fieldManager, "") +} + +// ExtractComponentStatusStatus is the same as ExtractComponentStatus except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractComponentStatusStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { + return extractComponentStatus(componentStatus, fieldManager, "status") +} + +func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) { b := &ComponentStatusApplyConfiguration{} - err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b) + err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go index b60f981b05ca1..0664c18498b21 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go @@ -51,7 +51,7 @@ func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration { // ExtractConfigMap extracts the applied configuration owned by fieldManager from // configMap. If no managedFields are found in configMap for fieldManager, a // ConfigMapApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // configMap must be a unmodified ConfigMap API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractConfigMap(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) { + return extractConfigMap(configMap, fieldManager, "") +} + +// ExtractConfigMapStatus is the same as ExtractConfigMap except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractConfigMapStatus(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) { + return extractConfigMap(configMap, fieldManager, "status") +} + +func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresource string) (*ConfigMapApplyConfiguration, error) { b := &ConfigMapApplyConfiguration{} - err := managedfields.ExtractInto(configMap, internal.Parser().Type("io.k8s.api.core.v1.ConfigMap"), fieldManager, b) + err := managedfields.ExtractInto(configMap, internal.Parser().Type("io.k8s.api.core.v1.ConfigMap"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go index 31a541c75a297..b3b302fe26125 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go @@ -49,7 +49,7 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration { // ExtractEndpoints extracts the applied configuration owned by fieldManager from // endpoints. If no managedFields are found in endpoints for fieldManager, a // EndpointsApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // endpoints must be a unmodified Endpoints API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { + return extractEndpoints(endpoints, fieldManager, "") +} + +// ExtractEndpointsStatus is the same as ExtractEndpoints except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEndpointsStatus(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { + return extractEndpoints(endpoints, fieldManager, "status") +} + +func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) { b := &EndpointsApplyConfiguration{} - err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b) + err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go index bd39a0fb08318..3a0c536945867 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go @@ -62,7 +62,7 @@ func Event(name, namespace string) *EventApplyConfiguration { // ExtractEvent extracts the applied configuration owned by fieldManager from // event. If no managedFields are found in event for fieldManager, a // EventApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // event must be a unmodified Event API object that was retrieved from the Kubernetes API. @@ -71,8 +71,19 @@ func Event(name, namespace string) *EventApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEvent(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) { + return extractEvent(event, fieldManager, "") +} + +// ExtractEventStatus is the same as ExtractEvent except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEventStatus(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) { + return extractEvent(event, fieldManager, "status") +} + +func extractEvent(event *apicorev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} - err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b) + err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go new file mode 100644 index 0000000000000..f94e55937ab31 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use +// with apply. +type GRPCActionApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Service *string `json:"service,omitempty"` +} + +// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with +// apply. +func GRPCAction() *GRPCActionApplyConfiguration { + return &GRPCActionApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *GRPCActionApplyConfiguration) WithPort(value int32) *GRPCActionApplyConfiguration { + b.Port = &value + return b +} + +// WithService sets the Service field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Service field is set to the value of the last call. +func (b *GRPCActionApplyConfiguration) WithService(value string) *GRPCActionApplyConfiguration { + b.Service = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go index ab37b6677b3ed..db9abf8af71c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go @@ -21,8 +21,8 @@ package v1 // LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use // with apply. type LifecycleApplyConfiguration struct { - PostStart *HandlerApplyConfiguration `json:"postStart,omitempty"` - PreStop *HandlerApplyConfiguration `json:"preStop,omitempty"` + PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"` + PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"` } // LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with @@ -34,7 +34,7 @@ func Lifecycle() *LifecycleApplyConfiguration { // WithPostStart sets the PostStart field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PostStart field is set to the value of the last call. -func (b *LifecycleApplyConfiguration) WithPostStart(value *HandlerApplyConfiguration) *LifecycleApplyConfiguration { +func (b *LifecycleApplyConfiguration) WithPostStart(value *LifecycleHandlerApplyConfiguration) *LifecycleApplyConfiguration { b.PostStart = value return b } @@ -42,7 +42,7 @@ func (b *LifecycleApplyConfiguration) WithPostStart(value *HandlerApplyConfigura // WithPreStop sets the PreStop field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PreStop field is set to the value of the last call. -func (b *LifecycleApplyConfiguration) WithPreStop(value *HandlerApplyConfiguration) *LifecycleApplyConfiguration { +func (b *LifecycleApplyConfiguration) WithPreStop(value *LifecycleHandlerApplyConfiguration) *LifecycleApplyConfiguration { b.PreStop = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/handler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go similarity index 69% rename from vendor/k8s.io/client-go/applyconfigurations/core/v1/handler.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index fbf1511ccc594..6e373dd4ed17f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/handler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -18,24 +18,24 @@ limitations under the License. package v1 -// HandlerApplyConfiguration represents an declarative configuration of the Handler type for use +// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use // with apply. -type HandlerApplyConfiguration struct { +type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` } -// HandlerApplyConfiguration constructs an declarative configuration of the Handler type for use with +// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with // apply. -func Handler() *HandlerApplyConfiguration { - return &HandlerApplyConfiguration{} +func LifecycleHandler() *LifecycleHandlerApplyConfiguration { + return &LifecycleHandlerApplyConfiguration{} } // WithExec sets the Exec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Exec field is set to the value of the last call. -func (b *HandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *HandlerApplyConfiguration { +func (b *LifecycleHandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { b.Exec = value return b } @@ -43,7 +43,7 @@ func (b *HandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration // WithHTTPGet sets the HTTPGet field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTPGet field is set to the value of the last call. -func (b *HandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *HandlerApplyConfiguration { +func (b *LifecycleHandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { b.HTTPGet = value return b } @@ -51,7 +51,7 @@ func (b *HandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfigu // WithTCPSocket sets the TCPSocket field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TCPSocket field is set to the value of the last call. -func (b *HandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *HandlerApplyConfiguration { +func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { b.TCPSocket = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go index 920b527f5168d..03207b8ec1b51 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go @@ -49,7 +49,7 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { // ExtractLimitRange extracts the applied configuration owned by fieldManager from // limitRange. If no managedFields are found in limitRange for fieldManager, a // LimitRangeApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // limitRange must be a unmodified LimitRange API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { + return extractLimitRange(limitRange, fieldManager, "") +} + +// ExtractLimitRangeStatus is the same as ExtractLimitRange except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLimitRangeStatus(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { + return extractLimitRange(limitRange, fieldManager, "status") +} + +func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) { b := &LimitRangeApplyConfiguration{} - err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b) + err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go index 4a829f404d149..ec29bcfd99eab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go @@ -49,7 +49,7 @@ func Namespace(name string) *NamespaceApplyConfiguration { // ExtractNamespace extracts the applied configuration owned by fieldManager from // namespace. If no managedFields are found in namespace for fieldManager, a // NamespaceApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // namespace must be a unmodified Namespace API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Namespace(name string) *NamespaceApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractNamespace(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { + return extractNamespace(namespace, fieldManager, "") +} + +// ExtractNamespaceStatus is the same as ExtractNamespace except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNamespaceStatus(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { + return extractNamespace(namespace, fieldManager, "status") +} + +func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) { b := &NamespaceApplyConfiguration{} - err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b) + err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go index 4f90da59f6c59..b26e9f499c3f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go @@ -49,7 +49,7 @@ func Node(name string) *NodeApplyConfiguration { // ExtractNode extracts the applied configuration owned by fieldManager from // node. If no managedFields are found in node for fieldManager, a // NodeApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // node must be a unmodified Node API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Node(name string) *NodeApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractNode(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { + return extractNode(node, fieldManager, "") +} + +// ExtractNodeStatus is the same as ExtractNode except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNodeStatus(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { + return extractNode(node, fieldManager, "status") +} + +func extractNode(node *apicorev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { b := &NodeApplyConfiguration{} - err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b) + err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go index a5df345e15bba..dcef6020f828e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go @@ -49,7 +49,7 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { // ExtractPersistentVolume extracts the applied configuration owned by fieldManager from // persistentVolume. If no managedFields are found in persistentVolume for fieldManager, a // PersistentVolumeApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // persistentVolume must be a unmodified PersistentVolume API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { + return extractPersistentVolume(persistentVolume, fieldManager, "") +} + +// ExtractPersistentVolumeStatus is the same as ExtractPersistentVolume except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPersistentVolumeStatus(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { + return extractPersistentVolume(persistentVolume, fieldManager, "status") +} + +func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) { b := &PersistentVolumeApplyConfiguration{} - err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b) + err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go index 229b5d48164ce..8ed20fa29c7e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go @@ -50,7 +50,7 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo // ExtractPersistentVolumeClaim extracts the applied configuration owned by fieldManager from // persistentVolumeClaim. If no managedFields are found in persistentVolumeClaim for fieldManager, a // PersistentVolumeClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // persistentVolumeClaim must be a unmodified PersistentVolumeClaim API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { + return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "") +} + +// ExtractPersistentVolumeClaimStatus is the same as ExtractPersistentVolumeClaim except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { + return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "status") +} + +func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) { b := &PersistentVolumeClaimApplyConfiguration{} - err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b) + err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index ac4d64c7114bf..e22d04b0d59cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -33,6 +33,7 @@ type PersistentVolumeClaimSpecApplyConfiguration struct { StorageClassName *string `json:"storageClassName,omitempty"` VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` + DataSourceRef *TypedLocalObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` } // PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with @@ -98,3 +99,11 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSource(value *Type b.DataSource = value return b } + +// WithDataSourceRef sets the DataSourceRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DataSourceRef field is set to the value of the last call. +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *TypedLocalObjectReferenceApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { + b.DataSourceRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 711651e0bcb7d..4c38d89f57399 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -25,10 +25,12 @@ import ( // PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + ResizeStatus *v1.PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty"` } // PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with @@ -75,3 +77,19 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithConditions(values .. } return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResizeStatus sets the ResizeStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResizeStatus field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithResizeStatus(value v1.PersistentVolumeClaimResizeStatus) *PersistentVolumeClaimStatusApplyConfiguration { + b.ResizeStatus = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go index 4783713929a53..c3649829a7850 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go @@ -50,7 +50,7 @@ func Pod(name, namespace string) *PodApplyConfiguration { // ExtractPod extracts the applied configuration owned by fieldManager from // pod. If no managedFields are found in pod for fieldManager, a // PodApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // pod must be a unmodified Pod API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Pod(name, namespace string) *PodApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPod(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { + return extractPod(pod, fieldManager, "") +} + +// ExtractPodStatus is the same as ExtractPod except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodStatus(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { + return extractPod(pod, fieldManager, "status") +} + +func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) { b := &PodApplyConfiguration{} - err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b) + err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go new file mode 100644 index 0000000000000..a5315d636b620 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use +// with apply. +type PodOSApplyConfiguration struct { + Name *v1.OSName `json:"name,omitempty"` +} + +// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with +// apply. +func PodOS() *PodOSApplyConfiguration { + return &PodOSApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodOSApplyConfiguration) WithName(value v1.OSName) *PodOSApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index d1c9ea9cb6221..015859e9a3886 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -60,6 +60,7 @@ type PodSpecApplyConfiguration struct { Overhead *corev1.ResourceList `json:"overhead,omitempty"` TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"` SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"` + OS *PodOSApplyConfiguration `json:"os,omitempty"` } // PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with @@ -398,3 +399,11 @@ func (b *PodSpecApplyConfiguration) WithSetHostnameAsFQDN(value bool) *PodSpecAp b.SetHostnameAsFQDN = &value return b } + +// WithOS sets the OS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OS field is set to the value of the last call. +func (b *PodSpecApplyConfiguration) WithOS(value *PodOSApplyConfiguration) *PodSpecApplyConfiguration { + b.OS = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go index 64263882a82c0..1460977c0c910 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go @@ -49,7 +49,7 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { // ExtractPodTemplate extracts the applied configuration owned by fieldManager from // podTemplate. If no managedFields are found in podTemplate for fieldManager, a // PodTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // podTemplate must be a unmodified PodTemplate API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { + return extractPodTemplate(podTemplate, fieldManager, "") +} + +// ExtractPodTemplateStatus is the same as ExtractPodTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodTemplateStatus(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { + return extractPodTemplate(podTemplate, fieldManager, "status") +} + +func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) { b := &PodTemplateApplyConfiguration{} - err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b) + err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index f87adcd5f32ab..10730557a0ee8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -21,13 +21,13 @@ package v1 // ProbeApplyConfiguration represents an declarative configuration of the Probe type for use // with apply. type ProbeApplyConfiguration struct { - HandlerApplyConfiguration `json:",inline"` - InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"` - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` - PeriodSeconds *int32 `json:"periodSeconds,omitempty"` - SuccessThreshold *int32 `json:"successThreshold,omitempty"` - FailureThreshold *int32 `json:"failureThreshold,omitempty"` - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + ProbeHandlerApplyConfiguration `json:",inline"` + InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"` + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + SuccessThreshold *int32 `json:"successThreshold,omitempty"` + FailureThreshold *int32 `json:"failureThreshold,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } // ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with @@ -60,6 +60,14 @@ func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfi return b } +// WithGRPC sets the GRPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPC field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeApplyConfiguration { + b.GRPC = value + return b +} + // WithInitialDelaySeconds sets the InitialDelaySeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InitialDelaySeconds field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go new file mode 100644 index 0000000000000..54f3344ac70d0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use +// with apply. +type ProbeHandlerApplyConfiguration struct { + Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` + HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` + TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` + GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"` +} + +// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with +// apply. +func ProbeHandler() *ProbeHandlerApplyConfiguration { + return &ProbeHandlerApplyConfiguration{} +} + +// WithExec sets the Exec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exec field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.Exec = value + return b +} + +// WithHTTPGet sets the HTTPGet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPGet field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.HTTPGet = value + return b +} + +// WithTCPSocket sets the TCPSocket field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TCPSocket field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.TCPSocket = value + return b +} + +// WithGRPC sets the GRPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPC field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.GRPC = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go index f9b243a65e98b..6dd6ae267592f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go @@ -50,7 +50,7 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo // ExtractReplicationController extracts the applied configuration owned by fieldManager from // replicationController. If no managedFields are found in replicationController for fieldManager, a // ReplicationControllerApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // replicationController must be a unmodified ReplicationController API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { + return extractReplicationController(replicationController, fieldManager, "") +} + +// ExtractReplicationControllerStatus is the same as ExtractReplicationController except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractReplicationControllerStatus(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { + return extractReplicationController(replicationController, fieldManager, "status") +} + +func extractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) { b := &ReplicationControllerApplyConfiguration{} - err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b) + err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go index a0c4f0c0a2b07..5cfb1988b12ba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go @@ -50,7 +50,7 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { // ExtractResourceQuota extracts the applied configuration owned by fieldManager from // resourceQuota. If no managedFields are found in resourceQuota for fieldManager, a // ResourceQuotaApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // resourceQuota must be a unmodified ResourceQuota API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { + return extractResourceQuota(resourceQuota, fieldManager, "") +} + +// ExtractResourceQuotaStatus is the same as ExtractResourceQuota except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceQuotaStatus(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { + return extractResourceQuota(resourceQuota, fieldManager, "status") +} + +func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) { b := &ResourceQuotaApplyConfiguration{} - err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b) + err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go index 7fd2aa9360f03..00d789f4163e6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go @@ -52,7 +52,7 @@ func Secret(name, namespace string) *SecretApplyConfiguration { // ExtractSecret extracts the applied configuration owned by fieldManager from // secret. If no managedFields are found in secret for fieldManager, a // SecretApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // secret must be a unmodified Secret API object that was retrieved from the Kubernetes API. @@ -61,8 +61,19 @@ func Secret(name, namespace string) *SecretApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractSecret(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) { + return extractSecret(secret, fieldManager, "") +} + +// ExtractSecretStatus is the same as ExtractSecret except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractSecretStatus(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) { + return extractSecret(secret, fieldManager, "status") +} + +func extractSecret(secret *corev1.Secret, fieldManager string, subresource string) (*SecretApplyConfiguration, error) { b := &SecretApplyConfiguration{} - err := managedfields.ExtractInto(secret, internal.Parser().Type("io.k8s.api.core.v1.Secret"), fieldManager, b) + err := managedfields.ExtractInto(secret, internal.Parser().Type("io.k8s.api.core.v1.Secret"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go index 38c439559328e..31b47311fff90 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go @@ -50,7 +50,7 @@ func Service(name, namespace string) *ServiceApplyConfiguration { // ExtractService extracts the applied configuration owned by fieldManager from // service. If no managedFields are found in service for fieldManager, a // ServiceApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // service must be a unmodified Service API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Service(name, namespace string) *ServiceApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractService(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { + return extractService(service, fieldManager, "") +} + +// ExtractServiceStatus is the same as ExtractService except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceStatus(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { + return extractService(service, fieldManager, "status") +} + +func extractService(service *apicorev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) { b := &ServiceApplyConfiguration{} - err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b) + err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go index 46983e4a7b425..459d025ebbf6d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go @@ -51,7 +51,7 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { // ExtractServiceAccount extracts the applied configuration owned by fieldManager from // serviceAccount. If no managedFields are found in serviceAccount for fieldManager, a // ServiceAccountApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // serviceAccount must be a unmodified ServiceAccount API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { + return extractServiceAccount(serviceAccount, fieldManager, "") +} + +// ExtractServiceAccountStatus is the same as ExtractServiceAccount except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceAccountStatus(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { + return extractServiceAccount(serviceAccount, fieldManager, "status") +} + +func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) { b := &ServiceAccountApplyConfiguration{} - err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b) + err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index 99361cecf0672..856cd4f9d820e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -39,7 +39,6 @@ type ServiceSpecApplyConfiguration struct { HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` - TopologyKeys []string `json:"topologyKeys,omitempty"` IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"` IPFamilyPolicy *corev1.IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` @@ -182,16 +181,6 @@ func (b *ServiceSpecApplyConfiguration) WithSessionAffinityConfig(value *Session return b } -// WithTopologyKeys adds the given value to the TopologyKeys field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the TopologyKeys field. -func (b *ServiceSpecApplyConfiguration) WithTopologyKeys(values ...string) *ServiceSpecApplyConfiguration { - for i := range values { - b.TopologyKeys = append(b.TopologyKeys, values[i]) - } - return b -} - // WithIPFamilies adds the given value to the IPFamilies field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the IPFamilies field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go index 2442063c4ecd6..20692e01466d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go @@ -24,6 +24,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct { GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"` GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty"` RunAsUserName *string `json:"runAsUserName,omitempty"` + HostProcess *bool `json:"hostProcess,omitempty"` } // WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with @@ -55,3 +56,11 @@ func (b *WindowsSecurityContextOptionsApplyConfiguration) WithRunAsUserName(valu b.RunAsUserName = &value return b } + +// WithHostProcess sets the HostProcess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostProcess field is set to the value of the last call. +func (b *WindowsSecurityContextOptionsApplyConfiguration) WithHostProcess(value bool) *WindowsSecurityContextOptionsApplyConfiguration { + b.HostProcess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go index 681013f0415e7..6feaab25dc6c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go @@ -51,7 +51,7 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { // ExtractEndpointSlice extracts the applied configuration owned by fieldManager from // endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a // EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { + return extractEndpointSlice(endpointSlice, fieldManager, "") +} + +// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEndpointSliceStatus(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { + return extractEndpointSlice(endpointSlice, fieldManager, "status") +} + +func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) { b := &EndpointSliceApplyConfiguration{} - err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1.EndpointSlice"), fieldManager, b) + err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1.EndpointSlice"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go index a96708225a74b..bacc1134db3f1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go @@ -51,7 +51,7 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { // ExtractEndpointSlice extracts the applied configuration owned by fieldManager from // endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a // EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API. @@ -60,8 +60,19 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { + return extractEndpointSlice(endpointSlice, fieldManager, "") +} + +// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEndpointSliceStatus(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { + return extractEndpointSlice(endpointSlice, fieldManager, "status") +} + +func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) { b := &EndpointSliceApplyConfiguration{} - err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b) + err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go index 860fff586ce57..19cc9e0adc5e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go @@ -63,7 +63,7 @@ func Event(name, namespace string) *EventApplyConfiguration { // ExtractEvent extracts the applied configuration owned by fieldManager from // event. If no managedFields are found in event for fieldManager, a // EventApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // event must be a unmodified Event API object that was retrieved from the Kubernetes API. @@ -72,8 +72,19 @@ func Event(name, namespace string) *EventApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEvent(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { + return extractEvent(event, fieldManager, "") +} + +// ExtractEventStatus is the same as ExtractEvent except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEventStatus(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { + return extractEvent(event, fieldManager, "status") +} + +func extractEvent(event *apieventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} - err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b) + err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go index 65057f957b4fb..f02bdd2b988b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go @@ -63,7 +63,7 @@ func Event(name, namespace string) *EventApplyConfiguration { // ExtractEvent extracts the applied configuration owned by fieldManager from // event. If no managedFields are found in event for fieldManager, a // EventApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // event must be a unmodified Event API object that was retrieved from the Kubernetes API. @@ -72,8 +72,19 @@ func Event(name, namespace string) *EventApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEvent(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) { + return extractEvent(event, fieldManager, "") +} + +// ExtractEventStatus is the same as ExtractEvent except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEventStatus(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) { + return extractEvent(event, fieldManager, "status") +} + +func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} - err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1beta1.Event"), fieldManager, b) + err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1beta1.Event"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go index 09777e4340d42..1455873258e35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go @@ -50,7 +50,7 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // ExtractDaemonSet extracts the applied configuration owned by fieldManager from // daemonSet. If no managedFields are found in daemonSet for fieldManager, a // DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { + return extractDaemonSet(daemonSet, fieldManager, "") +} + +// ExtractDaemonSetStatus is the same as ExtractDaemonSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDaemonSetStatus(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { + return extractDaemonSet(daemonSet, fieldManager, "status") +} + +func extractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { b := &DaemonSetApplyConfiguration{} - err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.DaemonSet"), fieldManager, b) + err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.DaemonSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go index cc9d8fdc3af4b..e64e4ca380989 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go @@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // ExtractDeployment extracts the applied configuration owned by fieldManager from // deployment. If no managedFields are found in deployment for fieldManager, a // DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "") +} + +// ExtractDeploymentStatus is the same as ExtractDeployment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeploymentStatus(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { + return extractDeployment(deployment, fieldManager, "status") +} + +func extractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { b := &DeploymentApplyConfiguration{} - err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Deployment"), fieldManager, b) + err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Deployment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go index ac30106667b36..df4fbc2cd18a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go @@ -50,7 +50,7 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // ExtractIngress extracts the applied configuration owned by fieldManager from // ingress. If no managedFields are found in ingress for fieldManager, a // IngressApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "") +} + +// ExtractIngressStatus is the same as ExtractIngress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressStatus(ingress *extensionsv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "status") +} + +func extractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { b := &IngressApplyConfiguration{} - err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Ingress"), fieldManager, b) + err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Ingress"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 0b25c9c966fd9..9652925160418 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -49,7 +49,7 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { // ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from // networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a // NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { + return extractNetworkPolicy(networkPolicy, fieldManager, "") +} + +// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNetworkPolicyStatus(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { + return extractNetworkPolicy(networkPolicy, fieldManager, "status") +} + +func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) { b := &NetworkPolicyApplyConfiguration{} - err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.NetworkPolicy"), fieldManager, b) + err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.NetworkPolicy"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go index e2c8d8f81134a..cceec69f995a7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go @@ -48,7 +48,7 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { // ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from // podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a // PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { + return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +} + +// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { + return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +} + +func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b) + err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go index dc7e7da78e212..b57cefc9da683 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go @@ -50,7 +50,7 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // ExtractReplicaSet extracts the applied configuration owned by fieldManager from // replicaSet. If no managedFields are found in replicaSet for fieldManager, a // ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { + return extractReplicaSet(replicaSet, fieldManager, "") +} + +// ExtractReplicaSetStatus is the same as ExtractReplicaSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractReplicaSetStatus(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { + return extractReplicaSet(replicaSet, fieldManager, "status") +} + +func extractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { b := &ReplicaSetApplyConfiguration{} - err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.ReplicaSet"), fieldManager, b) + err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.ReplicaSet"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go new file mode 100644 index 0000000000000..701145825dab4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go @@ -0,0 +1,233 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// with apply. +type ScaleApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *v1beta1.ScaleSpec `json:"spec,omitempty"` + Status *v1beta1.ScaleStatus `json:"status,omitempty"` +} + +// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// apply. +func Scale() *ScaleApplyConfiguration { + return &ScaleApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go index 76107d2d5902c..2a76cf32eb6ac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go @@ -49,7 +49,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // ExtractFlowSchema extracts the applied configuration owned by fieldManager from // flowSchema. If no managedFields are found in flowSchema for fieldManager, a // FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "") +} + +// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "status") +} + +func extractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} - err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b) + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go index 5f497ac786a20..4f36afe53caca 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -49,7 +49,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from // priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a // PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") +} + +// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") +} + +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b) + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go index 2c23ff949c7b0..794ff25a7b2c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go @@ -49,7 +49,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // ExtractFlowSchema extracts the applied configuration owned by fieldManager from // flowSchema. If no managedFields are found in flowSchema for fieldManager, a // FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "") +} + +// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "status") +} + +func extractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} - err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.FlowSchema"), fieldManager, b) + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go index 57f118ad82d90..57d1cd397de01 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -49,7 +49,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from // priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a // PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") +} + +// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") +} + +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"), fieldManager, b) + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go new file mode 100644 index 0000000000000..924f966d487ad --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// with apply. +type FlowDistinguisherMethodApplyConfiguration struct { + Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` +} + +// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// apply. +func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { + return &FlowDistinguisherMethodApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go new file mode 100644 index 0000000000000..323d7241d31bd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -0,0 +1,274 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// with apply. +type FlowSchemaApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` + Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` +} + +// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// apply. +func FlowSchema(name string) *FlowSchemaApplyConfiguration { + b := &FlowSchemaApplyConfiguration{} + b.WithName(name) + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b +} + +// ExtractFlowSchema extracts the applied configuration owned by fieldManager from +// flowSchema. If no managedFields are found in flowSchema for fieldManager, a +// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API. +// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "") +} + +// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "status") +} + +func extractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { + b := &FlowSchemaApplyConfiguration{} + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.FlowSchema"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(flowSchema.Name) + + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithSelfLink(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithClusterName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithSpec(value *FlowSchemaSpecApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go new file mode 100644 index 0000000000000..04dfcbf11a909 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// with apply. +type FlowSchemaConditionApplyConfiguration struct { + Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// apply. +func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { + return &FlowSchemaConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *FlowSchemaConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithReason(value string) *FlowSchemaConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithMessage(value string) *FlowSchemaConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go new file mode 100644 index 0000000000000..a5477e2768f99 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// with apply. +type FlowSchemaSpecApplyConfiguration struct { + PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` + MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"` + DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"` + Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` +} + +// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// apply. +func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { + return &FlowSchemaSpecApplyConfiguration{} +} + +// WithPriorityLevelConfiguration sets the PriorityLevelConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityLevelConfiguration field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithPriorityLevelConfiguration(value *PriorityLevelConfigurationReferenceApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.PriorityLevelConfiguration = value + return b +} + +// WithMatchingPrecedence sets the MatchingPrecedence field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchingPrecedence field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithMatchingPrecedence(value int32) *FlowSchemaSpecApplyConfiguration { + b.MatchingPrecedence = &value + return b +} + +// WithDistinguisherMethod sets the DistinguisherMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DistinguisherMethod field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithDistinguisherMethod(value *FlowDistinguisherMethodApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.DistinguisherMethod = value + return b +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *FlowSchemaSpecApplyConfiguration) WithRules(values ...*PolicyRulesWithSubjectsApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go new file mode 100644 index 0000000000000..67c5be2cbe570 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// with apply. +type FlowSchemaStatusApplyConfiguration struct { + Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// apply. +func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { + return &FlowSchemaStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *FlowSchemaStatusApplyConfiguration) WithConditions(values ...*FlowSchemaConditionApplyConfiguration) *FlowSchemaStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go new file mode 100644 index 0000000000000..b670f2cfd97d3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// with apply. +type GroupSubjectApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// apply. +func GroupSubject() *GroupSubjectApplyConfiguration { + return &GroupSubjectApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GroupSubjectApplyConfiguration) WithName(value string) *GroupSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go new file mode 100644 index 0000000000000..e25f7f6b06c50 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// with apply. +type LimitedPriorityLevelConfigurationApplyConfiguration struct { + AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` + LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` +} + +// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// apply. +func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { + return &LimitedPriorityLevelConfigurationApplyConfiguration{} +} + +// WithAssuredConcurrencyShares sets the AssuredConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AssuredConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithAssuredConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.AssuredConcurrencyShares = &value + return b +} + +// WithLimitResponse sets the LimitResponse field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LimitResponse field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse(value *LimitResponseApplyConfiguration) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LimitResponse = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go new file mode 100644 index 0000000000000..a9b7661fb23e3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// with apply. +type LimitResponseApplyConfiguration struct { + Type *v1beta2.LimitResponseType `json:"type,omitempty"` + Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` +} + +// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// apply. +func LimitResponse() *LimitResponseApplyConfiguration { + return &LimitResponseApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithType(value v1beta2.LimitResponseType) *LimitResponseApplyConfiguration { + b.Type = &value + return b +} + +// WithQueuing sets the Queuing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queuing field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithQueuing(value *QueuingConfigurationApplyConfiguration) *LimitResponseApplyConfiguration { + b.Queuing = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go new file mode 100644 index 0000000000000..cb8ba0afd65ae --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// with apply. +type NonResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + NonResourceURLs []string `json:"nonResourceURLs,omitempty"` +} + +// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// apply. +func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { + return &NonResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithNonResourceURLs adds the given value to the NonResourceURLs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceURLs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithNonResourceURLs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.NonResourceURLs = append(b.NonResourceURLs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go new file mode 100644 index 0000000000000..179c3979db10b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// with apply. +type PolicyRulesWithSubjectsApplyConfiguration struct { + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"` + NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` +} + +// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// apply. +func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { + return &PolicyRulesWithSubjectsApplyConfiguration{} +} + +// WithSubjects adds the given value to the Subjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subjects field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithSubjects(values ...*SubjectApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubjects") + } + b.Subjects = append(b.Subjects, *values[i]) + } + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithResourceRules(values ...*ResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithNonResourceRules adds the given value to the NonResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithNonResourceRules(values ...*NonResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNonResourceRules") + } + b.NonResourceRules = append(b.NonResourceRules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go new file mode 100644 index 0000000000000..4ac11bba65bea --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -0,0 +1,274 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// with apply. +type PriorityLevelConfigurationApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` +} + +// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// apply. +func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { + b := &PriorityLevelConfigurationApplyConfiguration{} + b.WithName(name) + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b +} + +// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from +// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a +// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API. +// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") +} + +// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") +} + +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { + b := &PriorityLevelConfigurationApplyConfiguration{} + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(priorityLevelConfiguration.Name) + + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithSelfLink(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithClusterName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithSpec(value *PriorityLevelConfigurationSpecApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *PriorityLevelConfigurationStatusApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go new file mode 100644 index 0000000000000..f742adeff0833 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// with apply. +type PriorityLevelConfigurationConditionApplyConfiguration struct { + Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// apply. +func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { + return &PriorityLevelConfigurationConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithReason(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithMessage(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go new file mode 100644 index 0000000000000..581b451ffd23b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// with apply. +type PriorityLevelConfigurationReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// apply. +func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { + return &PriorityLevelConfigurationReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationReferenceApplyConfiguration) WithName(value string) *PriorityLevelConfigurationReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go new file mode 100644 index 0000000000000..5560ed9e567c8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// with apply. +type PriorityLevelConfigurationSpecApplyConfiguration struct { + Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` + Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` +} + +// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// apply. +func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { + return &PriorityLevelConfigurationSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithLimited sets the Limited field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limited field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *LimitedPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Limited = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go new file mode 100644 index 0000000000000..b55e32be00252 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// with apply. +type PriorityLevelConfigurationStatusApplyConfiguration struct { + Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// apply. +func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { + return &PriorityLevelConfigurationStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *PriorityLevelConfigurationStatusApplyConfiguration) WithConditions(values ...*PriorityLevelConfigurationConditionApplyConfiguration) *PriorityLevelConfigurationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go new file mode 100644 index 0000000000000..06246fb27ed0d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// with apply. +type QueuingConfigurationApplyConfiguration struct { + Queues *int32 `json:"queues,omitempty"` + HandSize *int32 `json:"handSize,omitempty"` + QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` +} + +// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// apply. +func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { + return &QueuingConfigurationApplyConfiguration{} +} + +// WithQueues sets the Queues field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queues field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueues(value int32) *QueuingConfigurationApplyConfiguration { + b.Queues = &value + return b +} + +// WithHandSize sets the HandSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HandSize field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithHandSize(value int32) *QueuingConfigurationApplyConfiguration { + b.HandSize = &value + return b +} + +// WithQueueLengthLimit sets the QueueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueueLengthLimit field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueueLengthLimit(value int32) *QueuingConfigurationApplyConfiguration { + b.QueueLengthLimit = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go new file mode 100644 index 0000000000000..b67ea1c7f9062 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go @@ -0,0 +1,83 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// with apply. +type ResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + Resources []string `json:"resources,omitempty"` + ClusterScope *bool `json:"clusterScope,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` +} + +// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// apply. +func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { + return &ResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *ResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *ResourcePolicyRuleApplyConfiguration) WithAPIGroups(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourcePolicyRuleApplyConfiguration) WithResources(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithClusterScope sets the ClusterScope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterScope field is set to the value of the last call. +func (b *ResourcePolicyRuleApplyConfiguration) WithClusterScope(value bool) *ResourcePolicyRuleApplyConfiguration { + b.ClusterScope = &value + return b +} + +// WithNamespaces adds the given value to the Namespaces field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Namespaces field. +func (b *ResourcePolicyRuleApplyConfiguration) WithNamespaces(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Namespaces = append(b.Namespaces, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go new file mode 100644 index 0000000000000..b6cfdcad3b558 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// with apply. +type ServiceAccountSubjectApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// apply. +func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { + return &ServiceAccountSubjectApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithNamespace(value string) *ServiceAccountSubjectApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithName(value string) *ServiceAccountSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go new file mode 100644 index 0000000000000..7030785b8c459 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// with apply. +type SubjectApplyConfiguration struct { + Kind *v1beta2.SubjectKind `json:"kind,omitempty"` + User *UserSubjectApplyConfiguration `json:"user,omitempty"` + Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` + ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` +} + +// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// apply. +func Subject() *SubjectApplyConfiguration { + return &SubjectApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithKind(value v1beta2.SubjectKind) *SubjectApplyConfiguration { + b.Kind = &value + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithUser(value *UserSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.User = value + return b +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithGroup(value *GroupSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.Group = value + return b +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithServiceAccount(value *ServiceAccountSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.ServiceAccount = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go new file mode 100644 index 0000000000000..8c77b3e8a2102 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// with apply. +type UserSubjectApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// apply. +func UserSubject() *UserSubjectApplyConfiguration { + return &UserSubjectApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *UserSubjectApplyConfiguration) WithName(value string) *UserSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index abe43c0bf4711..824c5e9582e06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -482,8 +482,8 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic map: elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus map: fields: @@ -907,9 +907,24 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted + type: + scalar: string + - name: whenScaled + type: + scalar: string - name: io.k8s.api.apps.v1.StatefulSetSpec map: fields: + - name: minReadySeconds + type: + scalar: numeric + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -943,6 +958,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.apps.v1.StatefulSetStatus map: fields: + - name: availableReplicas + type: + scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1188,9 +1207,24 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted + type: + scalar: string + - name: whenScaled + type: + scalar: string - name: io.k8s.api.apps.v1beta1.StatefulSetSpec map: fields: + - name: minReadySeconds + type: + scalar: numeric + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -1224,6 +1258,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.apps.v1beta1.StatefulSetStatus map: fields: + - name: availableReplicas + type: + scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1667,9 +1705,24 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted + type: + scalar: string + - name: whenScaled + type: + scalar: string - name: io.k8s.api.apps.v1beta2.StatefulSetSpec map: fields: + - name: minReadySeconds + type: + scalar: numeric + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -1703,6 +1756,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.apps.v1beta2.StatefulSetStatus map: fields: + - name: availableReplicas + type: + scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1759,6 +1816,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + elementRelationship: atomic - name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler map: fields: @@ -1817,7 +1875,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource +- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource map: fields: - name: container @@ -1828,31 +1886,26 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: targetAverageUtilization - type: - scalar: numeric - - name: targetAverageValue + - name: target type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus map: fields: - name: container type: scalar: string default: "" - - name: currentAverageUtilization - type: - scalar: numeric - - name: currentAverageValue + - name: current type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus default: {} - name: name type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference +- name: io.k8s.api.autoscaling.v2.CrossVersionObjectReference map: fields: - name: apiVersion @@ -1866,40 +1919,59 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource +- name: io.k8s.api.autoscaling.v2.ExternalMetricSource map: fields: - - name: metricName - type: - scalar: string - default: "" - - name: metricSelector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: targetAverageValue + - name: metric type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: targetValue + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} + - name: target type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ExternalMetricStatus map: fields: - - name: currentAverageValue + - name: current type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: currentValue + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: metric type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier default: {} - - name: metricName +- name: io.k8s.api.autoscaling.v2.HPAScalingPolicy + map: + fields: + - name: periodSeconds + type: + scalar: numeric + default: 0 + - name: type type: scalar: string default: "" - - name: metricSelector + - name: value type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + scalar: numeric + default: 0 +- name: io.k8s.api.autoscaling.v2.HPAScalingRules + map: + fields: + - name: policies + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2.HPAScalingPolicy + elementRelationship: atomic + - name: selectPolicy + type: + scalar: string + - name: stabilizationWindowSeconds + type: + scalar: numeric +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler map: fields: - name: apiVersion @@ -1914,13 +1986,22 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec default: {} - name: status type: - namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus default: {} -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior + map: + fields: + - name: scaleDown + type: + namedType: io.k8s.api.autoscaling.v2.HPAScalingRules + - name: scaleUp + type: + namedType: io.k8s.api.autoscaling.v2.HPAScalingRules +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition map: fields: - name: lastTransitionTime @@ -1941,9 +2022,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec map: fields: + - name: behavior + type: + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior - name: maxReplicas type: scalar: numeric @@ -1952,34 +2036,35 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta1.MetricSpec + namedType: io.k8s.api.autoscaling.v2.MetricSpec elementRelationship: atomic - name: minReplicas type: scalar: numeric - name: scaleTargetRef type: - namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference default: {} -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition - elementRelationship: atomic + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition + elementRelationship: associative + keys: + - type - name: currentMetrics type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta1.MetricStatus + namedType: io.k8s.api.autoscaling.v2.MetricStatus elementRelationship: atomic - name: currentReplicas type: scalar: numeric - default: 0 - name: desiredReplicas type: scalar: numeric @@ -1990,148 +2075,163 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.autoscaling.v2beta1.MetricSpec +- name: io.k8s.api.autoscaling.v2.MetricIdentifier + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2.MetricSpec map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource + namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource - name: external type: - namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource + namedType: io.k8s.api.autoscaling.v2.ExternalMetricSource - name: object type: - namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource + namedType: io.k8s.api.autoscaling.v2.ObjectMetricSource - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricSource + namedType: io.k8s.api.autoscaling.v2.PodsMetricSource - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource + namedType: io.k8s.api.autoscaling.v2.ResourceMetricSource - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.MetricStatus +- name: io.k8s.api.autoscaling.v2.MetricStatus map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus - name: external type: - namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus + namedType: io.k8s.api.autoscaling.v2.ExternalMetricStatus - name: object type: - namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus + namedType: io.k8s.api.autoscaling.v2.ObjectMetricStatus - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus + namedType: io.k8s.api.autoscaling.v2.PodsMetricStatus - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.ResourceMetricStatus - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource +- name: io.k8s.api.autoscaling.v2.MetricTarget map: fields: + - name: averageUtilization + type: + scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: metricName + - name: type type: scalar: string default: "" - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: target - type: - namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference - default: {} - - name: targetValue + - name: value type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} -- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus +- name: io.k8s.api.autoscaling.v2.MetricValueStatus map: fields: + - name: averageUtilization + type: + scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: currentValue + - name: value type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - - name: metricName +- name: io.k8s.api.autoscaling.v2.ObjectMetricSource + map: + fields: + - name: describedObject type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} + - name: metric type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} - name: target type: - namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference + namedType: io.k8s.api.autoscaling.v2.MetricTarget default: {} -- name: io.k8s.api.autoscaling.v2beta1.PodsMetricSource +- name: io.k8s.api.autoscaling.v2.ObjectMetricStatus map: fields: - - name: metricName + - name: current type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: describedObject type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: targetAverageValue + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} + - name: metric type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier default: {} -- name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus +- name: io.k8s.api.autoscaling.v2.PodsMetricSource map: fields: - - name: currentAverageValue + - name: metric type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier default: {} - - name: metricName + - name: target type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.PodsMetricStatus + map: + fields: + - name: current type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2.ResourceMetricSource map: fields: - name: name type: scalar: string default: "" - - name: targetAverageUtilization - type: - scalar: numeric - - name: targetAverageValue + - name: target type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ResourceMetricStatus map: fields: - - name: currentAverageUtilization - type: - scalar: numeric - - name: currentAverageValue + - name: current type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus default: {} - name: name type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource +- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource map: fields: - name: container @@ -2142,26 +2242,31 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: target + - name: targetAverageUtilization type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus + scalar: numeric + - name: targetAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus map: fields: - name: container type: scalar: string default: "" - - name: current + - name: currentAverageUtilization type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + scalar: numeric + - name: currentAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity default: {} - name: name type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference +- name: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference map: fields: - name: apiVersion @@ -2175,59 +2280,40 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource +- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource map: fields: - - name: metric + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} - - name: target + scalar: string + default: "" + - name: metricSelector type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus - map: - fields: - - name: current + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: targetAverageValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} - - name: metric + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: targetValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} -- name: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus map: fields: - - name: periodSeconds - type: - scalar: numeric - default: 0 - - name: type - type: - scalar: string - default: "" - - name: value + - name: currentAverageValue type: - scalar: numeric - default: 0 -- name: io.k8s.api.autoscaling.v2beta2.HPAScalingRules - map: - fields: - - name: policies + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: currentValue type: - list: - elementType: - namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy - elementRelationship: atomic - - name: selectPolicy + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} + - name: metricName type: scalar: string - - name: stabilizationWindowSeconds + default: "" + - name: metricSelector type: - scalar: numeric -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler map: fields: - name: apiVersion @@ -2242,22 +2328,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec + namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec default: {} - name: status type: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus + namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus default: {} -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior - map: - fields: - - name: scaleDown - type: - namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules - - name: scaleUp - type: - namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition map: fields: - name: lastTransitionTime @@ -2278,12 +2355,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec map: fields: - - name: behavior - type: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior - name: maxReplicas type: scalar: numeric @@ -2292,29 +2366,29 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta2.MetricSpec + namedType: io.k8s.api.autoscaling.v2beta1.MetricSpec elementRelationship: atomic - name: minReplicas type: scalar: numeric - name: scaleTargetRef type: - namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference + namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference default: {} -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition + namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition elementRelationship: atomic - name: currentMetrics type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta2.MetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.MetricStatus elementRelationship: atomic - name: currentReplicas type: @@ -2330,143 +2404,154 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - map: - fields: - - name: name - type: - scalar: string - default: "" - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2beta2.MetricSpec +- name: io.k8s.api.autoscaling.v2beta1.MetricSpec map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource - name: external type: - namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource - name: object type: - namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricSource - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.MetricStatus +- name: io.k8s.api.autoscaling.v2beta1.MetricStatus map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus - name: external type: - namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus - name: object type: - namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.MetricTarget +- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource map: fields: - - name: averageUtilization - type: - scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: type + - name: metricName type: scalar: string default: "" - - name: value + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: target + type: + namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference + default: {} + - name: targetValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} +- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus map: fields: - - name: averageUtilization - type: - scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: value + - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource - map: - fields: - - name: describedObject - type: - namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference default: {} - - name: metric + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} + scalar: string + default: "" + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - name: target type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget + namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference default: {} -- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus +- name: io.k8s.api.autoscaling.v2beta1.PodsMetricSource map: fields: - - name: current + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} - - name: describedObject + scalar: string + default: "" + - name: selector type: - namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference - default: {} - - name: metric + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: targetAverageValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity default: {} -- name: io.k8s.api.autoscaling.v2beta2.PodsMetricSource +- name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus map: fields: - - name: metric + - name: currentAverageValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity default: {} - - name: target + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + scalar: string + default: "" + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource map: fields: - - name: current + - name: name type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} - - name: metric + scalar: string + default: "" + - name: targetAverageUtilization type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier + scalar: numeric + - name: targetAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus + map: + fields: + - name: currentAverageUtilization + type: + scalar: numeric + - name: currentAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity default: {} -- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource map: fields: + - name: container + type: + scalar: string + default: "" - name: name type: scalar: string @@ -2475,9 +2560,13 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget default: {} -- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus +- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus map: fields: + - name: container + type: + scalar: string + default: "" - name: current type: namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus @@ -2486,7 +2575,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.batch.v1.CronJob +- name: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference map: fields: - name: apiVersion @@ -2495,60 +2584,64 @@ var schemaYAML = typed.YAMLObject(`types: - name: kind type: scalar: string - - name: metadata + default: "" + - name: name type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource + map: + fields: + - name: metric type: - namedType: io.k8s.api.batch.v1.CronJobSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: status + - name: target type: - namedType: io.k8s.api.batch.v1.CronJobStatus + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget default: {} -- name: io.k8s.api.batch.v1.CronJobSpec +- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus map: fields: - - name: concurrencyPolicy - type: - scalar: string - - name: failedJobsHistoryLimit + - name: current type: - scalar: numeric - - name: jobTemplate + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} + - name: metric type: - namedType: io.k8s.api.batch.v1.JobTemplateSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: schedule +- name: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy + map: + fields: + - name: periodSeconds + type: + scalar: numeric + default: 0 + - name: type type: scalar: string default: "" - - name: startingDeadlineSeconds - type: - scalar: numeric - - name: successfulJobsHistoryLimit + - name: value type: scalar: numeric - - name: suspend - type: - scalar: boolean -- name: io.k8s.api.batch.v1.CronJobStatus + default: 0 +- name: io.k8s.api.autoscaling.v2beta2.HPAScalingRules map: fields: - - name: active + - name: policies type: list: elementType: - namedType: io.k8s.api.core.v1.ObjectReference + namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy elementRelationship: atomic - - name: lastScheduleTime + - name: selectPolicy type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: lastSuccessfulTime + scalar: string + - name: stabilizationWindowSeconds type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time -- name: io.k8s.api.batch.v1.Job + scalar: numeric +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler map: fields: - name: apiVersion @@ -2563,19 +2656,24 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.batch.v1.JobSpec + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec default: {} - name: status type: - namedType: io.k8s.api.batch.v1.JobStatus + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus default: {} -- name: io.k8s.api.batch.v1.JobCondition +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior map: fields: - - name: lastProbeTime + - name: scaleDown type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} + namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules + - name: scaleUp + type: + namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition + map: + fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -2594,248 +2692,277 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.batch.v1.JobSpec +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec map: fields: - - name: activeDeadlineSeconds - type: - scalar: numeric - - name: backoffLimit - type: - scalar: numeric - - name: completionMode + - name: behavior type: - scalar: string - - name: completions + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior + - name: maxReplicas type: scalar: numeric - - name: manualSelector + default: 0 + - name: metrics type: - scalar: boolean - - name: parallelism + list: + elementType: + namedType: io.k8s.api.autoscaling.v2beta2.MetricSpec + elementRelationship: atomic + - name: minReplicas type: scalar: numeric - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: suspend - type: - scalar: boolean - - name: template + - name: scaleTargetRef type: - namedType: io.k8s.api.core.v1.PodTemplateSpec + namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference default: {} - - name: ttlSecondsAfterFinished - type: - scalar: numeric -- name: io.k8s.api.batch.v1.JobStatus +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus map: fields: - - name: active - type: - scalar: numeric - - name: completedIndexes - type: - scalar: string - - name: completionTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: conditions type: list: elementType: - namedType: io.k8s.api.batch.v1.JobCondition + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition elementRelationship: atomic - - name: failed + - name: currentMetrics + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2beta2.MetricStatus + elementRelationship: atomic + - name: currentReplicas type: scalar: numeric - - name: startTime + default: 0 + - name: desiredReplicas + type: + scalar: numeric + default: 0 + - name: lastScaleTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: succeeded + - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.batch.v1.JobTemplateSpec +- name: io.k8s.api.autoscaling.v2beta2.MetricIdentifier map: fields: - - name: metadata + - name: name type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + scalar: string + default: "" + - name: selector type: - namedType: io.k8s.api.batch.v1.JobSpec - default: {} -- name: io.k8s.api.batch.v1beta1.CronJob + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2beta2.MetricSpec map: fields: - - name: apiVersion + - name: containerResource type: - scalar: string - - name: kind + namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource + - name: external type: - scalar: string - - name: metadata + namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource + - name: object type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource + - name: pods type: - namedType: io.k8s.api.batch.v1beta1.CronJobSpec - default: {} - - name: status + namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricSource + - name: resource type: - namedType: io.k8s.api.batch.v1beta1.CronJobStatus - default: {} -- name: io.k8s.api.batch.v1beta1.CronJobSpec + namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2beta2.MetricStatus map: fields: - - name: concurrencyPolicy + - name: containerResource type: - scalar: string - - name: failedJobsHistoryLimit + namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus + - name: external type: - scalar: numeric - - name: jobTemplate + namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus + - name: object type: - namedType: io.k8s.api.batch.v1beta1.JobTemplateSpec - default: {} - - name: schedule + namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus + - name: pods + type: + namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + - name: resource + type: + namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus + - name: type type: scalar: string default: "" - - name: startingDeadlineSeconds +- name: io.k8s.api.autoscaling.v2beta2.MetricTarget + map: + fields: + - name: averageUtilization type: scalar: numeric - - name: successfulJobsHistoryLimit + - name: averageValue type: - scalar: numeric - - name: suspend + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: type type: - scalar: boolean -- name: io.k8s.api.batch.v1beta1.CronJobStatus + scalar: string + default: "" + - name: value + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta2.MetricValueStatus map: fields: - - name: active + - name: averageUtilization type: - list: - elementType: - namedType: io.k8s.api.core.v1.ObjectReference - elementRelationship: atomic - - name: lastScheduleTime + scalar: numeric + - name: averageValue type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: lastSuccessfulTime + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: value type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time -- name: io.k8s.api.batch.v1beta1.JobTemplateSpec + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource map: fields: - - name: metadata + - name: describedObject type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference default: {} - - name: spec + - name: metric type: - namedType: io.k8s.api.batch.v1.JobSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} -- name: io.k8s.api.certificates.v1.CertificateSigningRequest + - name: target + type: + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus map: fields: - - name: apiVersion - type: - scalar: string - - name: kind + - name: current type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus default: {} - - name: spec + - name: describedObject type: - namedType: io.k8s.api.certificates.v1.CertificateSigningRequestSpec + namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference default: {} - - name: status + - name: metric type: - namedType: io.k8s.api.certificates.v1.CertificateSigningRequestStatus + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} -- name: io.k8s.api.certificates.v1.CertificateSigningRequestCondition +- name: io.k8s.api.autoscaling.v2beta2.PodsMetricSource map: fields: - - name: lastTransitionTime + - name: metric type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: lastUpdateTime + - name: target type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget default: {} - - name: message +- name: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + map: + fields: + - name: current type: - scalar: string - - name: reason + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} + - name: metric type: - scalar: string - - name: status + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + map: + fields: + - name: name type: scalar: string default: "" - - name: type + - name: target + type: + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus + map: + fields: + - name: current + type: + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} + - name: name type: scalar: string default: "" -- name: io.k8s.api.certificates.v1.CertificateSigningRequestSpec +- name: io.k8s.api.batch.v1.CronJob map: fields: - - name: extra + - name: apiVersion type: - map: - elementType: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: groups + scalar: string + - name: kind type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: request + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.batch.v1.CronJobSpec + default: {} + - name: status + type: + namedType: io.k8s.api.batch.v1.CronJobStatus + default: {} +- name: io.k8s.api.batch.v1.CronJobSpec + map: + fields: + - name: concurrencyPolicy type: scalar: string - - name: signerName + - name: failedJobsHistoryLimit + type: + scalar: numeric + - name: jobTemplate + type: + namedType: io.k8s.api.batch.v1.JobTemplateSpec + default: {} + - name: schedule type: scalar: string default: "" - - name: uid + - name: startingDeadlineSeconds type: - scalar: string - - name: usages + scalar: numeric + - name: successfulJobsHistoryLimit type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: username + scalar: numeric + - name: suspend type: - scalar: string -- name: io.k8s.api.certificates.v1.CertificateSigningRequestStatus + scalar: boolean +- name: io.k8s.api.batch.v1.CronJobStatus map: fields: - - name: certificate - type: - scalar: string - - name: conditions + - name: active type: list: elementType: - namedType: io.k8s.api.certificates.v1.CertificateSigningRequestCondition - elementRelationship: associative - keys: - - type -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest + namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic + - name: lastScheduleTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastSuccessfulTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.api.batch.v1.Job map: fields: - name: apiVersion @@ -2850,20 +2977,20 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec + namedType: io.k8s.api.batch.v1.JobSpec default: {} - name: status type: - namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + namedType: io.k8s.api.batch.v1.JobStatus default: {} -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition +- name: io.k8s.api.batch.v1.JobCondition map: fields: - - name: lastTransitionTime + - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time default: {} - - name: lastUpdateTime + - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time default: {} @@ -2881,56 +3008,100 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec +- name: io.k8s.api.batch.v1.JobSpec map: fields: - - name: extra + - name: activeDeadlineSeconds type: - map: - elementType: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: groups + scalar: numeric + - name: backoffLimit type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: request + scalar: numeric + - name: completionMode type: scalar: string - - name: signerName + - name: completions type: - scalar: string - - name: uid + scalar: numeric + - name: manualSelector + type: + scalar: boolean + - name: parallelism + type: + scalar: numeric + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: suspend + type: + scalar: boolean + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + default: {} + - name: ttlSecondsAfterFinished + type: + scalar: numeric +- name: io.k8s.api.batch.v1.JobStatus + map: + fields: + - name: active + type: + scalar: numeric + - name: completedIndexes type: scalar: string - - name: usages + - name: completionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: conditions type: list: elementType: - scalar: string + namedType: io.k8s.api.batch.v1.JobCondition elementRelationship: atomic - - name: username + - name: failed type: - scalar: string -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + scalar: numeric + - name: ready + type: + scalar: numeric + - name: startTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: succeeded + type: + scalar: numeric + - name: uncountedTerminatedPods + type: + namedType: io.k8s.api.batch.v1.UncountedTerminatedPods +- name: io.k8s.api.batch.v1.JobTemplateSpec map: fields: - - name: certificate + - name: metadata type: - scalar: string - - name: conditions + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.batch.v1.JobSpec + default: {} +- name: io.k8s.api.batch.v1.UncountedTerminatedPods + map: + fields: + - name: failed type: list: elementType: - namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition + scalar: string elementRelationship: associative - keys: - - type -- name: io.k8s.api.coordination.v1.Lease + - name: succeeded + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.batch.v1beta1.CronJob map: fields: - name: apiVersion @@ -2945,202 +3116,472 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.coordination.v1.LeaseSpec + namedType: io.k8s.api.batch.v1beta1.CronJobSpec default: {} -- name: io.k8s.api.coordination.v1.LeaseSpec + - name: status + type: + namedType: io.k8s.api.batch.v1beta1.CronJobStatus + default: {} +- name: io.k8s.api.batch.v1beta1.CronJobSpec map: fields: - - name: acquireTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - - name: holderIdentity + - name: concurrencyPolicy type: scalar: string - - name: leaseDurationSeconds + - name: failedJobsHistoryLimit type: scalar: numeric - - name: leaseTransitions + - name: jobTemplate + type: + namedType: io.k8s.api.batch.v1beta1.JobTemplateSpec + default: {} + - name: schedule + type: + scalar: string + default: "" + - name: startingDeadlineSeconds type: scalar: numeric - - name: renewTime + - name: successfulJobsHistoryLimit type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime -- name: io.k8s.api.coordination.v1beta1.Lease + scalar: numeric + - name: suspend + type: + scalar: boolean +- name: io.k8s.api.batch.v1beta1.CronJobStatus map: fields: - - name: apiVersion + - name: active type: - scalar: string - - name: kind + list: + elementType: + namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic + - name: lastScheduleTime type: - scalar: string + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastSuccessfulTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.api.batch.v1beta1.JobTemplateSpec + map: + fields: - name: metadata type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - name: spec type: - namedType: io.k8s.api.coordination.v1beta1.LeaseSpec + namedType: io.k8s.api.batch.v1.JobSpec default: {} -- name: io.k8s.api.coordination.v1beta1.LeaseSpec +- name: io.k8s.api.certificates.v1.CertificateSigningRequest map: fields: - - name: acquireTime + - name: apiVersion type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - - name: holderIdentity + scalar: string + - name: kind type: scalar: string - - name: leaseDurationSeconds + - name: metadata type: - scalar: numeric - - name: leaseTransitions + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - scalar: numeric - - name: renewTime + namedType: io.k8s.api.certificates.v1.CertificateSigningRequestSpec + default: {} + - name: status type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime -- name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource + namedType: io.k8s.api.certificates.v1.CertificateSigningRequestStatus + default: {} +- name: io.k8s.api.certificates.v1.CertificateSigningRequestCondition map: fields: - - name: fsType + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message type: scalar: string - - name: partition + - name: reason type: - scalar: numeric - - name: readOnly + scalar: string + - name: status type: - scalar: boolean - - name: volumeID + scalar: string + default: "" + - name: type type: scalar: string default: "" -- name: io.k8s.api.core.v1.Affinity +- name: io.k8s.api.certificates.v1.CertificateSigningRequestSpec map: fields: - - name: nodeAffinity + - name: expirationSeconds type: - namedType: io.k8s.api.core.v1.NodeAffinity - - name: podAffinity + scalar: numeric + - name: extra type: - namedType: io.k8s.api.core.v1.PodAffinity - - name: podAntiAffinity + map: + elementType: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: groups type: - namedType: io.k8s.api.core.v1.PodAntiAffinity -- name: io.k8s.api.core.v1.AttachedVolume - map: - fields: - - name: devicePath + list: + elementType: + scalar: string + elementRelationship: atomic + - name: request type: scalar: string - default: "" - - name: name + - name: signerName type: scalar: string default: "" -- name: io.k8s.api.core.v1.AzureDiskVolumeSource - map: - fields: - - name: cachingMode + - name: uid type: scalar: string - - name: diskName + - name: usages + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: username type: scalar: string - default: "" - - name: diskURI +- name: io.k8s.api.certificates.v1.CertificateSigningRequestStatus + map: + fields: + - name: certificate type: scalar: string - default: "" - - name: fsType + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.certificates.v1.CertificateSigningRequestCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest + map: + fields: + - name: apiVersion type: scalar: string - name: kind type: scalar: string - - name: readOnly + - name: metadata type: - scalar: boolean -- name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec + default: {} + - name: status + type: + namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + default: {} +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition map: fields: - - name: readOnly + - name: lastTransitionTime type: - scalar: boolean - - name: secretName + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: lastUpdateTime type: - scalar: string - default: "" - - name: secretNamespace + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message type: scalar: string - - name: shareName + - name: reason type: scalar: string - default: "" -- name: io.k8s.api.core.v1.AzureFileVolumeSource - map: - fields: - - name: readOnly - type: - scalar: boolean - - name: secretName + - name: status type: scalar: string default: "" - - name: shareName + - name: type type: scalar: string default: "" -- name: io.k8s.api.core.v1.CSIPersistentVolumeSource +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec map: fields: - - name: controllerExpandSecretRef + - name: expirationSeconds type: - namedType: io.k8s.api.core.v1.SecretReference - - name: controllerPublishSecretRef + scalar: numeric + - name: extra type: - namedType: io.k8s.api.core.v1.SecretReference - - name: driver + map: + elementType: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: groups type: - scalar: string - default: "" - - name: fsType + list: + elementType: + scalar: string + elementRelationship: atomic + - name: request type: scalar: string - - name: nodePublishSecretRef - type: - namedType: io.k8s.api.core.v1.SecretReference - - name: nodeStageSecretRef + - name: signerName type: - namedType: io.k8s.api.core.v1.SecretReference - - name: readOnly + scalar: string + - name: uid type: - scalar: boolean - - name: volumeAttributes + scalar: string + - name: usages type: - map: + list: elementType: scalar: string - - name: volumeHandle + elementRelationship: atomic + - name: username type: scalar: string - default: "" -- name: io.k8s.api.core.v1.CSIVolumeSource +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus map: fields: - - name: driver + - name: certificate type: scalar: string - default: "" - - name: fsType + - name: conditions type: - scalar: string - - name: nodePublishSecretRef + list: + elementType: + namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.coordination.v1.Lease + map: + fields: + - name: apiVersion type: - namedType: io.k8s.api.core.v1.LocalObjectReference - - name: readOnly + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1.LeaseSpec + default: {} +- name: io.k8s.api.coordination.v1.LeaseSpec + map: + fields: + - name: acquireTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: holderIdentity + type: + scalar: string + - name: leaseDurationSeconds + type: + scalar: numeric + - name: leaseTransitions + type: + scalar: numeric + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime +- name: io.k8s.api.coordination.v1beta1.Lease + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1beta1.LeaseSpec + default: {} +- name: io.k8s.api.coordination.v1beta1.LeaseSpec + map: + fields: + - name: acquireTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: holderIdentity + type: + scalar: string + - name: leaseDurationSeconds + type: + scalar: numeric + - name: leaseTransitions + type: + scalar: numeric + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime +- name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: partition + type: + scalar: numeric + - name: readOnly + type: + scalar: boolean + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.Affinity + map: + fields: + - name: nodeAffinity + type: + namedType: io.k8s.api.core.v1.NodeAffinity + - name: podAffinity + type: + namedType: io.k8s.api.core.v1.PodAffinity + - name: podAntiAffinity + type: + namedType: io.k8s.api.core.v1.PodAntiAffinity +- name: io.k8s.api.core.v1.AttachedVolume + map: + fields: + - name: devicePath + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.AzureDiskVolumeSource + map: + fields: + - name: cachingMode + type: + scalar: string + - name: diskName + type: + scalar: string + default: "" + - name: diskURI + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: kind + type: + scalar: string + - name: readOnly + type: + scalar: boolean +- name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource + map: + fields: + - name: readOnly + type: + scalar: boolean + - name: secretName + type: + scalar: string + default: "" + - name: secretNamespace + type: + scalar: string + - name: shareName + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.AzureFileVolumeSource + map: + fields: + - name: readOnly + type: + scalar: boolean + - name: secretName + type: + scalar: string + default: "" + - name: shareName + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.CSIPersistentVolumeSource + map: + fields: + - name: controllerExpandSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: controllerPublishSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: nodeStageSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: readOnly + type: + scalar: boolean + - name: volumeAttributes + type: + map: + elementType: + scalar: string + - name: volumeHandle + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.CSIVolumeSource + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: readOnly type: scalar: boolean - name: volumeAttributes @@ -3335,6 +3776,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: optional type: scalar: boolean + elementRelationship: atomic - name: io.k8s.api.core.v1.ConfigMapNodeConfigSource map: fields: @@ -3683,6 +4125,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetRef type: namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic - name: io.k8s.api.core.v1.EndpointPort map: fields: @@ -3699,6 +4142,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: protocol type: scalar: string + elementRelationship: atomic - name: io.k8s.api.core.v1.EndpointSubset map: fields: @@ -3829,7 +4273,10 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: namedType: io.k8s.api.core.v1.ContainerPort - elementRelationship: atomic + elementRelationship: associative + keys: + - containerPort + - protocol - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe @@ -4067,6 +4514,17 @@ var schemaYAML = typed.YAMLObject(`types: - name: readOnly type: scalar: boolean +- name: io.k8s.api.core.v1.GRPCAction + map: + fields: + - name: port + type: + scalar: numeric + default: 0 + - name: service + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.GitRepoVolumeSource map: fields: @@ -4144,18 +4602,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.core.v1.Handler - map: - fields: - - name: exec - type: - namedType: io.k8s.api.core.v1.ExecAction - - name: httpGet - type: - namedType: io.k8s.api.core.v1.HTTPGetAction - - name: tcpSocket - type: - namedType: io.k8s.api.core.v1.TCPSocketAction - name: io.k8s.api.core.v1.HostAlias map: fields: @@ -4281,10 +4727,22 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: postStart type: - namedType: io.k8s.api.core.v1.Handler + namedType: io.k8s.api.core.v1.LifecycleHandler - name: preStop type: - namedType: io.k8s.api.core.v1.Handler + namedType: io.k8s.api.core.v1.LifecycleHandler +- name: io.k8s.api.core.v1.LifecycleHandler + map: + fields: + - name: exec + type: + namedType: io.k8s.api.core.v1.ExecAction + - name: httpGet + type: + namedType: io.k8s.api.core.v1.HTTPGetAction + - name: tcpSocket + type: + namedType: io.k8s.api.core.v1.TCPSocketAction - name: io.k8s.api.core.v1.LimitRange map: fields: @@ -4373,6 +4831,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + elementRelationship: atomic - name: io.k8s.api.core.v1.LocalVolumeSource map: fields: @@ -4568,6 +5027,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.NodeSelectorTerm elementRelationship: atomic + elementRelationship: atomic - name: io.k8s.api.core.v1.NodeSelectorRequirement map: fields: @@ -4600,6 +5060,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.NodeSelectorRequirement elementRelationship: atomic + elementRelationship: atomic - name: io.k8s.api.core.v1.NodeSpec map: fields: @@ -4744,6 +5205,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + elementRelationship: atomic - name: io.k8s.api.core.v1.ObjectReference map: fields: @@ -4768,6 +5230,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: uid type: scalar: string + elementRelationship: atomic - name: io.k8s.api.core.v1.PersistentVolume map: fields: @@ -4847,6 +5310,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: dataSource type: namedType: io.k8s.api.core.v1.TypedLocalObjectReference + - name: dataSourceRef + type: + namedType: io.k8s.api.core.v1.TypedLocalObjectReference - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -4872,6 +5338,11 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: capacity type: map: @@ -4888,7 +5359,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: phase type: scalar: string -- name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate + - name: resizeStatus + type: + scalar: string +- name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate map: fields: - name: metadata @@ -5163,6 +5637,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string +- name: io.k8s.api.core.v1.PodOS + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.PodReadinessGate map: fields: @@ -5290,6 +5771,10 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string + elementRelationship: atomic + - name: os + type: + namedType: io.k8s.api.core.v1.PodOS - name: overhead type: map: @@ -5499,6 +5984,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: failureThreshold type: scalar: numeric + - name: grpc + type: + namedType: io.k8s.api.core.v1.GRPCAction - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction @@ -5673,6 +6161,7 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string + elementRelationship: atomic - name: template type: namedType: io.k8s.api.core.v1.PodTemplateSpec @@ -5717,6 +6206,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + elementRelationship: atomic - name: io.k8s.api.core.v1.ResourceQuota map: fields: @@ -5875,6 +6365,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.ScopedResourceSelectorRequirement elementRelationship: atomic + elementRelationship: atomic - name: io.k8s.api.core.v1.ScopedResourceSelectorRequirement map: fields: @@ -5958,6 +6449,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: optional type: scalar: boolean + elementRelationship: atomic - name: io.k8s.api.core.v1.SecretProjection map: fields: @@ -5982,6 +6474,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string + elementRelationship: atomic - name: io.k8s.api.core.v1.SecretVolumeSource map: fields: @@ -6195,18 +6688,13 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string + elementRelationship: atomic - name: sessionAffinity type: scalar: string - name: sessionAffinityConfig type: namedType: io.k8s.api.core.v1.SessionAffinityConfig - - name: topologyKeys - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: type type: scalar: string @@ -6345,6 +6833,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.TopologySelectorLabelRequirement elementRelationship: atomic + elementRelationship: atomic - name: io.k8s.api.core.v1.TopologySpreadConstraint map: fields: @@ -6377,6 +6866,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + elementRelationship: atomic - name: io.k8s.api.core.v1.Volume map: fields: @@ -6562,6 +7052,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: gmsaCredentialSpecName type: scalar: string + - name: hostProcess + type: + scalar: boolean - name: runAsUserName type: scalar: string @@ -6634,6 +7127,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: protocol type: scalar: string + elementRelationship: atomic - name: io.k8s.api.discovery.v1.EndpointSlice map: fields: @@ -7408,19 +7902,324 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule + elementRelationship: atomic + - name: podSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + default: {} + - name: policyTypes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec + default: {} +- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec + map: + fields: + - name: allowPrivilegeEscalation + type: + scalar: boolean + - name: allowedCSIDrivers + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver + elementRelationship: atomic + - name: allowedCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: allowedFlexVolumes + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume + elementRelationship: atomic + - name: allowedHostPaths + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath + elementRelationship: atomic + - name: allowedProcMountTypes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: allowedUnsafeSysctls + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultAddCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultAllowPrivilegeEscalation + type: + scalar: boolean + - name: forbiddenSysctls + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: fsGroup + type: + namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions + default: {} + - name: hostIPC + type: + scalar: boolean + - name: hostNetwork + type: + scalar: boolean + - name: hostPID + type: + scalar: boolean + - name: hostPorts + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.HostPortRange + elementRelationship: atomic + - name: privileged + type: + scalar: boolean + - name: readOnlyRootFilesystem + type: + scalar: boolean + - name: requiredDropCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: runAsGroup + type: + namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions + - name: runAsUser + type: + namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions + default: {} + - name: runtimeClass + type: + namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions + - name: seLinux + type: + namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions + default: {} + - name: supplementalGroups + type: + namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions + default: {} + - name: volumes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.ReplicaSet + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + default: {} + - name: status + type: + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + default: {} +- name: io.k8s.api.extensions.v1beta1.ReplicaSetCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + map: + fields: + - name: minReadySeconds + type: + scalar: numeric + - name: replicas + type: + scalar: numeric + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + default: {} +- name: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + map: + fields: + - name: availableReplicas + type: + scalar: numeric + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetCondition + elementRelationship: associative + keys: + - type + - name: fullyLabeledReplicas + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + - name: replicas + type: + scalar: numeric + default: 0 +- name: io.k8s.api.extensions.v1beta1.RollbackConfig + map: + fields: + - name: revision + type: + scalar: numeric +- name: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet + map: + fields: + - name: maxSurge + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment + map: + fields: + - name: maxSurge + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IDRange + elementRelationship: atomic + - name: rule + type: + scalar: string + default: "" +- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IDRange + elementRelationship: atomic + - name: rule + type: + scalar: string + default: "" +- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions + map: + fields: + - name: allowedRuntimeClassNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultRuntimeClassName + type: + scalar: string +- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions + map: + fields: + - name: rule + type: + scalar: string + default: "" + - name: seLinuxOptions + type: + namedType: io.k8s.api.core.v1.SELinuxOptions +- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IDRange elementRelationship: atomic - - name: podSelector + - name: rule type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - default: {} - - name: policyTypes + scalar: string +- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + map: + fields: + - name: type type: - list: - elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema map: fields: - name: apiVersion @@ -7435,121 +8234,132 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec + - name: status + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + default: {} +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition map: fields: - - name: allowPrivilegeEscalation + - name: lastTransitionTime type: - scalar: boolean - - name: allowedCSIDrivers + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities + scalar: string + - name: reason type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes + scalar: string + - name: status type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths + scalar: string + - name: type type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes + scalar: string +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + map: + fields: + - name: distinguisherMethod + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + - name: matchingPrecedence + type: + scalar: numeric + default: 0 + - name: priorityLevelConfiguration + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + default: {} + - name: rules type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects elementRelationship: atomic - - name: allowedUnsafeSysctls +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + map: + fields: + - name: conditions type: list: elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + map: + fields: + - name: queuing + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: queuing + discriminatorValue: Queuing +- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + map: + fields: + - name: assuredConcurrencyShares + type: + scalar: numeric + default: 0 + - name: limitResponse + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + default: {} +- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + map: + fields: + - name: nonResourceURLs type: list: elementType: scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls + elementRelationship: associative + - name: verbs type: list: elementType: scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + map: + fields: + - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange + namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities + - name: resourceRules type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes + - name: subjects type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1alpha1.Subject elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.ReplicaSet +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -7564,13 +8374,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.extensions.v1beta1.ReplicaSetCondition +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -7586,146 +8396,138 @@ var schemaYAML = typed.YAMLObject(`types: - name: status type: scalar: string - default: "" - name: type type: scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.ReplicaSetSpec +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference map: fields: - - name: minReadySeconds - type: - scalar: numeric - - name: replicas - type: - scalar: numeric - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: template + - name: name type: - namedType: io.k8s.api.core.v1.PodTemplateSpec - default: {} -- name: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec map: fields: - - name: availableReplicas - type: - scalar: numeric - - name: conditions - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetCondition - elementRelationship: associative - keys: - - type - - name: fullyLabeledReplicas - type: - scalar: numeric - - name: observedGeneration - type: - scalar: numeric - - name: readyReplicas + - name: limited type: - scalar: numeric - - name: replicas + namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + - name: type type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.RollbackConfig + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: limited + discriminatorValue: Limited +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus map: fields: - - name: revision + - name: conditions type: - scalar: numeric -- name: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration map: fields: - - name: maxSurge - type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + - name: handSize type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment - map: - fields: - - name: maxSurge + scalar: numeric + default: 0 + - name: queueLengthLimit type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + scalar: numeric + default: 0 + - name: queues type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions + scalar: numeric + default: 0 +- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule map: fields: - - name: ranges + - name: apiGroups type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: string + elementRelationship: associative + - name: clusterScope type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges + scalar: boolean + - name: namespaces type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: string + elementRelationship: associative + - name: resources type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames + list: + elementType: + scalar: string + elementRelationship: associative + - name: verbs type: list: elementType: scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + map: + fields: + - name: name type: scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions + default: "" + - name: namespace + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1alpha1.Subject map: fields: - - name: rule + - name: group + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + - name: kind type: scalar: string default: "" - - name: seLinuxOptions + - name: serviceAccount type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions + namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + - name: user + type: + namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + unions: + - discriminator: kind + fields: + - fieldName: group + discriminatorValue: Group + - fieldName: serviceAccount + discriminatorValue: ServiceAccount + - fieldName: user + discriminatorValue: User +- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject map: fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + - name: name type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + default: "" +- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchema map: fields: - name: apiVersion @@ -7740,13 +8542,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition map: fields: - name: lastTransitionTime @@ -7765,50 +8567,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta1.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta1.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration - name: type type: scalar: string @@ -7818,7 +8620,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration map: fields: - name: assuredConcurrencyShares @@ -7827,9 +8629,9 @@ var schemaYAML = typed.YAMLObject(`types: default: 0 - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta1.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -7844,28 +8646,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.Subject + namedType: io.k8s.api.flowcontrol.v1beta1.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -7880,13 +8682,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -7905,19 +8707,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec map: fields: - name: limited type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -7927,18 +8729,18 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration map: fields: - name: handSize @@ -7953,7 +8755,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule map: fields: - name: apiGroups @@ -7983,7 +8785,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject map: fields: - name: name @@ -7994,22 +8796,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.Subject +- name: io.k8s.api.flowcontrol.v1beta1.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta1.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta1.UserSubject unions: - discriminator: kind fields: @@ -8019,21 +8821,21 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject +- name: io.k8s.api.flowcontrol.v1beta1.UserSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchema map: fields: - name: apiVersion @@ -8048,13 +8850,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition map: fields: - name: lastTransitionTime @@ -8073,50 +8875,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta1.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta2.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta2.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration - name: type type: scalar: string @@ -8126,7 +8928,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration map: fields: - name: assuredConcurrencyShares @@ -8135,9 +8937,9 @@ var schemaYAML = typed.YAMLObject(`types: default: 0 - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1beta1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta2.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8152,28 +8954,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.Subject + namedType: io.k8s.api.flowcontrol.v1beta2.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8188,13 +8990,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime @@ -8213,19 +9015,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec map: fields: - name: limited type: - namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -8235,18 +9037,18 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration map: fields: - name: handSize @@ -8261,7 +9063,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule map: fields: - name: apiGroups @@ -8291,7 +9093,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject map: fields: - name: name @@ -8302,22 +9104,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.Subject +- name: io.k8s.api.flowcontrol.v1beta2.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1beta1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta2.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1beta1.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta2.UserSubject unions: - discriminator: kind fields: @@ -8327,7 +9129,7 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1beta1.UserSubject +- name: io.k8s.api.flowcontrol.v1beta2.UserSubject map: fields: - name: name @@ -8859,6 +9661,7 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string + elementRelationship: atomic - name: tolerations type: list: @@ -8911,6 +9714,7 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string + elementRelationship: atomic - name: tolerations type: list: @@ -8956,12 +9760,29 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string + elementRelationship: atomic - name: tolerations type: list: elementType: namedType: io.k8s.api.core.v1.Toleration elementRelationship: atomic +- name: io.k8s.api.policy.v1.Eviction + map: + fields: + - name: apiVersion + type: + scalar: string + - name: deleteOptions + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} - name: io.k8s.api.policy.v1.PodDisruptionBudget map: fields: @@ -9504,6 +10325,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + elementRelationship: atomic - name: io.k8s.api.rbac.v1.Subject map: fields: @@ -9521,6 +10343,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string + elementRelationship: atomic - name: io.k8s.api.rbac.v1alpha1.AggregationRule map: fields: @@ -10565,8 +11388,8 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic map: elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector map: fields: @@ -10617,6 +11440,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: operation type: scalar: string + - name: subresource + type: + scalar: string - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -10714,6 +11540,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + elementRelationship: atomic - name: io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions map: fields: @@ -10735,8 +11562,8 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic map: elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.apimachinery.pkg.util.intstr.IntOrString scalar: untyped - name: __untyped_atomic_ diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go index 919ad9f12cc99..f4d7e2681236d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go @@ -25,12 +25,13 @@ import ( // ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use // with apply. type ManagedFieldsEntryApplyConfiguration struct { - Manager *string `json:"manager,omitempty"` - Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"` - APIVersion *string `json:"apiVersion,omitempty"` - Time *v1.Time `json:"time,omitempty"` - FieldsType *string `json:"fieldsType,omitempty"` - FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"` + Manager *string `json:"manager,omitempty"` + Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"` + APIVersion *string `json:"apiVersion,omitempty"` + Time *v1.Time `json:"time,omitempty"` + FieldsType *string `json:"fieldsType,omitempty"` + FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"` + Subresource *string `json:"subresource,omitempty"` } // ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with @@ -86,3 +87,11 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value v1.FieldsV1) * b.FieldsV1 = &value return b } + +// WithSubresource sets the Subresource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subresource field is set to the value of the last call. +func (b *ManagedFieldsEntryApplyConfiguration) WithSubresource(value string) *ManagedFieldsEntryApplyConfiguration { + b.Subresource = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go new file mode 100644 index 0000000000000..8a58d9e870559 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go @@ -0,0 +1,137 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/client-go/discovery" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// openAPISchemaTTL is how frequently we need to check +// whether the open API schema has changed or not. +const openAPISchemaTTL = time.Minute + +// UnstructuredExtractor enables extracting the applied configuration state from object for fieldManager into an +// unstructured object type. +type UnstructuredExtractor interface { + Extract(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) + ExtractStatus(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) +} + +// gvkParserCache caches the GVKParser in order to prevent from having to repeatedly +// parse the models from the open API schema when the schema itself changes infrequently. +type gvkParserCache struct { + // discoveryClient is the client for retrieving the openAPI document and checking + // whether the document has changed recently + discoveryClient discovery.DiscoveryInterface + // mu protects the gvkParser + mu sync.Mutex + // gvkParser retrieves the objectType for a given gvk + gvkParser *managedfields.GvkParser + // lastChecked is the last time we checked if the openAPI doc has changed. + lastChecked time.Time +} + +// regenerateGVKParser builds the parser from the raw OpenAPI schema. +func regenerateGVKParser(dc discovery.DiscoveryInterface) (*managedfields.GvkParser, error) { + doc, err := dc.OpenAPISchema() + if err != nil { + return nil, err + } + + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + return managedfields.NewGVKParser(models, false) +} + +// objectTypeForGVK retrieves the typed.ParseableType for a given gvk from the cache +func (c *gvkParserCache) objectTypeForGVK(gvk schema.GroupVersionKind) (*typed.ParseableType, error) { + c.mu.Lock() + defer c.mu.Unlock() + // if the ttl on the openAPISchema has expired, + // regenerate the gvk parser + if time.Since(c.lastChecked) > openAPISchemaTTL { + c.lastChecked = time.Now() + parser, err := regenerateGVKParser(c.discoveryClient) + if err != nil { + return nil, err + } + c.gvkParser = parser + } + return c.gvkParser.Type(gvk), nil +} + +type extractor struct { + cache *gvkParserCache +} + +// NewUnstructuredExtractor creates the extractor with which you can extract the applied configuration +// for a given manager from an unstructured object. +func NewUnstructuredExtractor(dc discovery.DiscoveryInterface) (UnstructuredExtractor, error) { + parser, err := regenerateGVKParser(dc) + if err != nil { + return nil, fmt.Errorf("failed generating initial GVK Parser: %v", err) + } + return &extractor{ + cache: &gvkParserCache{ + gvkParser: parser, + discoveryClient: dc, + }, + }, nil +} + +// Extract extracts the applied configuration owned by fieldManager from an unstructured object. +// Note that the apply configuration itself is also an unstructured object. +func (e *extractor) Extract(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) { + return e.extractUnstructured(object, fieldManager, "") +} + +// ExtractStatus is the same as ExtractUnstructured except +// that it extracts the status subresource applied configuration. +// Experimental! +func (e *extractor) ExtractStatus(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) { + return e.extractUnstructured(object, fieldManager, "status") +} + +func (e *extractor) extractUnstructured(object *unstructured.Unstructured, fieldManager string, subresource string) (*unstructured.Unstructured, error) { + gvk := object.GetObjectKind().GroupVersionKind() + objectType, err := e.cache.objectTypeForGVK(gvk) + if err != nil { + return nil, fmt.Errorf("failed to fetch the objectType: %v", err) + } + result := &unstructured.Unstructured{} + err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) + if err != nil { + return nil, fmt.Errorf("failed calling ExtractInto for unstructured: %v", err) + } + result.SetName(object.GetName()) + result.SetNamespace(object.GetNamespace()) + result.SetKind(object.GetKind()) + result.SetAPIVersion(object.GetAPIVersion()) + return result, nil +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go index 08cbec9042aee..74c0bb273d09f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go @@ -50,7 +50,7 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // ExtractIngress extracts the applied configuration owned by fieldManager from // ingress. If no managedFields are found in ingress for fieldManager, a // IngressApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractIngress(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "") +} + +// ExtractIngressStatus is the same as ExtractIngress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressStatus(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "status") +} + +func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { b := &IngressApplyConfiguration{} - err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b) + err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go index 105dc7d71634b..5b36992e45b07 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go @@ -48,7 +48,7 @@ func IngressClass(name string) *IngressClassApplyConfiguration { // ExtractIngressClass extracts the applied configuration owned by fieldManager from // ingressClass. If no managedFields are found in ingressClass for fieldManager, a // IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func IngressClass(name string) *IngressClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { + return extractIngressClass(ingressClass, fieldManager, "") +} + +// ExtractIngressClassStatus is the same as ExtractIngressClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressClassStatus(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { + return extractIngressClass(ingressClass, fieldManager, "status") +} + +func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) { b := &IngressClassApplyConfiguration{} - err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b) + err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 061111183d788..7091d7cfd503d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -49,7 +49,7 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { // ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from // networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a // NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { + return extractNetworkPolicy(networkPolicy, fieldManager, "") +} + +// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNetworkPolicyStatus(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { + return extractNetworkPolicy(networkPolicy, fieldManager, "status") +} + +func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) { b := &NetworkPolicyApplyConfiguration{} - err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b) + err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go index 3d15cf1270e4b..6b87d1ff3c67d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go @@ -50,7 +50,7 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // ExtractIngress extracts the applied configuration owned by fieldManager from // ingress. If no managedFields are found in ingress for fieldManager, a // IngressApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractIngress(ingress *networkingv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "") +} + +// ExtractIngressStatus is the same as ExtractIngress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressStatus(ingress *networkingv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "status") +} + +func extractIngress(ingress *networkingv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { b := &IngressApplyConfiguration{} - err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1beta1.Ingress"), fieldManager, b) + err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1beta1.Ingress"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go index f80fae107dd17..3a13cd0834222 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go @@ -48,7 +48,7 @@ func IngressClass(name string) *IngressClassApplyConfiguration { // ExtractIngressClass extracts the applied configuration owned by fieldManager from // ingressClass. If no managedFields are found in ingressClass for fieldManager, a // IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func IngressClass(name string) *IngressClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { + return extractIngressClass(ingressClass, fieldManager, "") +} + +// ExtractIngressClassStatus is the same as ExtractIngressClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressClassStatus(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { + return extractIngressClass(ingressClass, fieldManager, "status") +} + +func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) { b := &IngressClassApplyConfiguration{} - err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1beta1.IngressClass"), fieldManager, b) + err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1beta1.IngressClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go index 9f6cbef595c6e..1521f2cef56f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go @@ -50,7 +50,7 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // ExtractRuntimeClass extracts the applied configuration owned by fieldManager from // runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a // RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { + return extractRuntimeClass(runtimeClass, fieldManager, "") +} + +// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRuntimeClassStatus(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { + return extractRuntimeClass(runtimeClass, fieldManager, "status") +} + +func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { b := &RuntimeClassApplyConfiguration{} - err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b) + err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go index d40ba68b930c5..295e763d76b13 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go @@ -48,7 +48,7 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // ExtractRuntimeClass extracts the applied configuration owned by fieldManager from // runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a // RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { + return extractRuntimeClass(runtimeClass, fieldManager, "") +} + +// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRuntimeClassStatus(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { + return extractRuntimeClass(runtimeClass, fieldManager, "status") +} + +func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { b := &RuntimeClassApplyConfiguration{} - err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1alpha1.RuntimeClass"), fieldManager, b) + err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1alpha1.RuntimeClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go index b56bc0ce7f564..2424e205e3d33 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go @@ -50,7 +50,7 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // ExtractRuntimeClass extracts the applied configuration owned by fieldManager from // runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a // RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { + return extractRuntimeClass(runtimeClass, fieldManager, "") +} + +// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRuntimeClassStatus(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { + return extractRuntimeClass(runtimeClass, fieldManager, "status") +} + +func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { b := &RuntimeClassApplyConfiguration{} - err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1beta1.RuntimeClass"), fieldManager, b) + err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1beta1.RuntimeClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go new file mode 100644 index 0000000000000..07bda7467ef6c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go @@ -0,0 +1,267 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// with apply. +type EvictionApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` +} + +// Eviction constructs an declarative configuration of the Eviction type for use with +// apply. +func Eviction(name, namespace string) *EvictionApplyConfiguration { + b := &EvictionApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Eviction") + b.WithAPIVersion("policy/v1") + return b +} + +// ExtractEviction extracts the applied configuration owned by fieldManager from +// eviction. If no managedFields are found in eviction for fieldManager, a +// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API. +// ExtractEviction provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractEviction(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { + return extractEviction(eviction, fieldManager, "") +} + +// ExtractEvictionStatus is the same as ExtractEviction except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEvictionStatus(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { + return extractEviction(eviction, fieldManager, "status") +} + +func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) { + b := &EvictionApplyConfiguration{} + err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1.Eviction"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(eviction.Name) + b.WithNamespace(eviction.Namespace) + + b.WithKind("Eviction") + b.WithAPIVersion("policy/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithSelfLink(value string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithClusterName(value string) *EvictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDeleteOptions sets the DeleteOptions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeleteOptions field is set to the value of the last call. +func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration { + b.DeleteOptions = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go index 3233f5386ed5e..888c20f60638d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go @@ -50,7 +50,7 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig // ExtractPodDisruptionBudget extracts the applied configuration owned by fieldManager from // podDisruptionBudget. If no managedFields are found in podDisruptionBudget for fieldManager, a // PodDisruptionBudgetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // podDisruptionBudget must be a unmodified PodDisruptionBudget API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { + return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "") +} + +// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { + return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status") +} + +func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) { b := &PodDisruptionBudgetApplyConfiguration{} - err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b) + err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go index 1db2695f7ddfc..e1f0f137ee40b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go @@ -49,7 +49,7 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration { // ExtractEviction extracts the applied configuration owned by fieldManager from // eviction. If no managedFields are found in eviction for fieldManager, a // EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractEviction(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { + return extractEviction(eviction, fieldManager, "") +} + +// ExtractEvictionStatus is the same as ExtractEviction except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEvictionStatus(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { + return extractEviction(eviction, fieldManager, "status") +} + +func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) { b := &EvictionApplyConfiguration{} - err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b) + err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go index 36d039445165d..fc28026f5d1e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go @@ -50,7 +50,7 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig // ExtractPodDisruptionBudget extracts the applied configuration owned by fieldManager from // podDisruptionBudget. If no managedFields are found in podDisruptionBudget for fieldManager, a // PodDisruptionBudgetApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // podDisruptionBudget must be a unmodified PodDisruptionBudget API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { + return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "") +} + +// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { + return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status") +} + +func extractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) { b := &PodDisruptionBudgetApplyConfiguration{} - err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodDisruptionBudget"), fieldManager, b) + err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodDisruptionBudget"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go index 3ff73633c4968..0500824c9b3b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go @@ -48,7 +48,7 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { // ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from // podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a // PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { + return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +} + +// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSecurityPolicyStatus(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { + return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +} + +func extractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b) + err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go index 92ade083e44f2..1b4b515963d15 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go @@ -49,7 +49,7 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // ExtractClusterRole extracts the applied configuration owned by fieldManager from // clusterRole. If no managedFields are found in clusterRole for fieldManager, a // ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "") +} + +// ExtractClusterRoleStatus is the same as ExtractClusterRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleStatus(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "status") +} + +func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { b := &ClusterRoleApplyConfiguration{} - err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b) + err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go index 7bbbdaec9884f..d17fc6e55a384 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go @@ -49,7 +49,7 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from // clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a // ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "") +} + +// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleBindingStatus(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status") +} + +func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { b := &ClusterRoleBindingApplyConfiguration{} - err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b) + err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go index 772122f456412..854441bbd99d3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go @@ -49,7 +49,7 @@ func Role(name, namespace string) *RoleApplyConfiguration { // ExtractRole extracts the applied configuration owned by fieldManager from // role. If no managedFields are found in role for fieldManager, a // RoleApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // role must be a unmodified Role API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Role(name, namespace string) *RoleApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRole(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "") +} + +// ExtractRoleStatus is the same as ExtractRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleStatus(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "status") +} + +func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { b := &RoleApplyConfiguration{} - err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b) + err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go index 85dc476cd4412..88075a70d0b8b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go @@ -50,7 +50,7 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // ExtractRoleBinding extracts the applied configuration owned by fieldManager from // roleBinding. If no managedFields are found in roleBinding for fieldManager, a // RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "") +} + +// ExtractRoleBindingStatus is the same as ExtractRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleBindingStatus(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "status") +} + +func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { b := &RoleBindingApplyConfiguration{} - err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b) + err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go index 4e2d4a63c3b10..ae9cfc474903f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go @@ -49,7 +49,7 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // ExtractClusterRole extracts the applied configuration owned by fieldManager from // clusterRole. If no managedFields are found in clusterRole for fieldManager, a // ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "") +} + +// ExtractClusterRoleStatus is the same as ExtractClusterRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleStatus(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "status") +} + +func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { b := &ClusterRoleApplyConfiguration{} - err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRole"), fieldManager, b) + err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRole"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go index 10c93e5a5f755..f5a2c03bb6ab0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go @@ -49,7 +49,7 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from // clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a // ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "") +} + +// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status") +} + +func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { b := &ClusterRoleBindingApplyConfiguration{} - err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"), fieldManager, b) + err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go index d9bf15b4c9731..ec5bebceccaf2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go @@ -49,7 +49,7 @@ func Role(name, namespace string) *RoleApplyConfiguration { // ExtractRole extracts the applied configuration owned by fieldManager from // role. If no managedFields are found in role for fieldManager, a // RoleApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // role must be a unmodified Role API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Role(name, namespace string) *RoleApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRole(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "") +} + +// ExtractRoleStatus is the same as ExtractRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleStatus(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "status") +} + +func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { b := &RoleApplyConfiguration{} - err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.Role"), fieldManager, b) + err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.Role"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go index ca6f563a37383..930f9489ff265 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go @@ -50,7 +50,7 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // ExtractRoleBinding extracts the applied configuration owned by fieldManager from // roleBinding. If no managedFields are found in roleBinding for fieldManager, a // RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "") +} + +// ExtractRoleBindingStatus is the same as ExtractRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleBindingStatus(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "status") +} + +func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { b := &RoleBindingApplyConfiguration{} - err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.RoleBinding"), fieldManager, b) + err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.RoleBinding"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go index 4f5f28170ab6f..1574f8f6606ea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go @@ -49,7 +49,7 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // ExtractClusterRole extracts the applied configuration owned by fieldManager from // clusterRole. If no managedFields are found in clusterRole for fieldManager, a // ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "") +} + +// ExtractClusterRoleStatus is the same as ExtractClusterRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleStatus(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "status") +} + +func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { b := &ClusterRoleApplyConfiguration{} - err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRole"), fieldManager, b) + err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRole"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go index 6fbe0aa984205..152a23b4927f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go @@ -49,7 +49,7 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from // clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a // ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "") +} + +// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status") +} + +func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { b := &ClusterRoleBindingApplyConfiguration{} - err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRoleBinding"), fieldManager, b) + err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRoleBinding"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go index 7e3744afba555..dc6ff2a40f572 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go @@ -49,7 +49,7 @@ func Role(name, namespace string) *RoleApplyConfiguration { // ExtractRole extracts the applied configuration owned by fieldManager from // role. If no managedFields are found in role for fieldManager, a // RoleApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // role must be a unmodified Role API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func Role(name, namespace string) *RoleApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRole(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "") +} + +// ExtractRoleStatus is the same as ExtractRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleStatus(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "status") +} + +func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { b := &RoleApplyConfiguration{} - err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1beta1.Role"), fieldManager, b) + err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1beta1.Role"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go index ee3e6c55de1e6..aeef6ec8fab16 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go @@ -50,7 +50,7 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // ExtractRoleBinding extracts the applied configuration owned by fieldManager from // roleBinding. If no managedFields are found in roleBinding for fieldManager, a // RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API. @@ -59,8 +59,19 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "") +} + +// ExtractRoleBindingStatus is the same as ExtractRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleBindingStatus(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "status") +} + +func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { b := &RoleBindingApplyConfiguration{} - err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.RoleBinding"), fieldManager, b) + err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.RoleBinding"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go index 6a942ecf1cb2c..5d528ea7cf82d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go @@ -52,7 +52,7 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // ExtractPriorityClass extracts the applied configuration owned by fieldManager from // priorityClass. If no managedFields are found in priorityClass for fieldManager, a // PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API. @@ -61,8 +61,19 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { + return extractPriorityClass(priorityClass, fieldManager, "") +} + +// ExtractPriorityClassStatus is the same as ExtractPriorityClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityClassStatus(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { + return extractPriorityClass(priorityClass, fieldManager, "status") +} + +func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} - err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1.PriorityClass"), fieldManager, b) + err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1.PriorityClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go index 46dc278d9a9be..2b2aac3165676 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go @@ -52,7 +52,7 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // ExtractPriorityClass extracts the applied configuration owned by fieldManager from // priorityClass. If no managedFields are found in priorityClass for fieldManager, a // PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API. @@ -61,8 +61,19 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { + return extractPriorityClass(priorityClass, fieldManager, "") +} + +// ExtractPriorityClassStatus is the same as ExtractPriorityClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityClassStatus(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { + return extractPriorityClass(priorityClass, fieldManager, "status") +} + +func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} - err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b) + err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go index 9327b7b619ac8..14b1feea62dc4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go @@ -52,7 +52,7 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // ExtractPriorityClass extracts the applied configuration owned by fieldManager from // priorityClass. If no managedFields are found in priorityClass for fieldManager, a // PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API. @@ -61,8 +61,19 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { + return extractPriorityClass(priorityClass, fieldManager, "") +} + +// ExtractPriorityClassStatus is the same as ExtractPriorityClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityClassStatus(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { + return extractPriorityClass(priorityClass, fieldManager, "status") +} + +func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} - err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b) + err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go index 31b35446ec7e1..cf9073a0fd314 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go @@ -48,7 +48,7 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration { // ExtractCSIDriver extracts the applied configuration owned by fieldManager from // cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a // CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { + return extractCSIDriver(cSIDriver, fieldManager, "") +} + +// ExtractCSIDriverStatus is the same as ExtractCSIDriver except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSIDriverStatus(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { + return extractCSIDriver(cSIDriver, fieldManager, "status") +} + +func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) { b := &CSIDriverApplyConfiguration{} - err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b) + err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go index 8da150bd50df2..e65582d8908ca 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go @@ -48,7 +48,7 @@ func CSINode(name string) *CSINodeApplyConfiguration { // ExtractCSINode extracts the applied configuration owned by fieldManager from // cSINode. If no managedFields are found in cSINode for fieldManager, a // CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func CSINode(name string) *CSINodeApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCSINode(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { + return extractCSINode(cSINode, fieldManager, "") +} + +// ExtractCSINodeStatus is the same as ExtractCSINode except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSINodeStatus(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { + return extractCSINode(cSINode, fieldManager, "status") +} + +func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) { b := &CSINodeApplyConfiguration{} - err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b) + err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go index ac5b8ca8d1020..2df999c24a9a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go @@ -56,7 +56,7 @@ func StorageClass(name string) *StorageClassApplyConfiguration { // ExtractStorageClass extracts the applied configuration owned by fieldManager from // storageClass. If no managedFields are found in storageClass for fieldManager, a // StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API. @@ -65,8 +65,19 @@ func StorageClass(name string) *StorageClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractStorageClass(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { + return extractStorageClass(storageClass, fieldManager, "") +} + +// ExtractStorageClassStatus is the same as ExtractStorageClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStorageClassStatus(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { + return extractStorageClass(storageClass, fieldManager, "status") +} + +func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) { b := &StorageClassApplyConfiguration{} - err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1.StorageClass"), fieldManager, b) + err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1.StorageClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go index 97c37c609d4e5..5fd3d4d8e6fbf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go @@ -49,7 +49,7 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from // volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a // VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { + return extractVolumeAttachment(volumeAttachment, fieldManager, "") +} + +// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttachmentStatus(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { + return extractVolumeAttachment(volumeAttachment, fieldManager, "status") +} + +func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { b := &VolumeAttachmentApplyConfiguration{} - err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b) + err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go index 32cf1b9f8b5a6..a7ad0717bf89a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go @@ -53,7 +53,7 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from // cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a // CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API. @@ -62,8 +62,19 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { + return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") +} + +// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { + return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") +} + +func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { b := &CSIStorageCapacityApplyConfiguration{} - err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b) + err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go index bcc0f77295ba0..7a30919649ca1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go @@ -49,7 +49,7 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from // volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a // VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { + return extractVolumeAttachment(volumeAttachment, fieldManager, "") +} + +// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { + return extractVolumeAttachment(volumeAttachment, fieldManager, "status") +} + +func extractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { b := &VolumeAttachmentApplyConfiguration{} - err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttachment"), fieldManager, b) + err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttachment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go index aad9ce500abbb..4ff0fcdf199da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go @@ -48,7 +48,7 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration { // ExtractCSIDriver extracts the applied configuration owned by fieldManager from // cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a // CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { + return extractCSIDriver(cSIDriver, fieldManager, "") +} + +// ExtractCSIDriverStatus is the same as ExtractCSIDriver except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSIDriverStatus(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { + return extractCSIDriver(cSIDriver, fieldManager, "status") +} + +func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) { b := &CSIDriverApplyConfiguration{} - err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIDriver"), fieldManager, b) + err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIDriver"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go index 5d151f28eaed9..fce97b456dea9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go @@ -48,7 +48,7 @@ func CSINode(name string) *CSINodeApplyConfiguration { // ExtractCSINode extracts the applied configuration owned by fieldManager from // cSINode. If no managedFields are found in cSINode for fieldManager, a // CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API. @@ -57,8 +57,19 @@ func CSINode(name string) *CSINodeApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { + return extractCSINode(cSINode, fieldManager, "") +} + +// ExtractCSINodeStatus is the same as ExtractCSINode except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSINodeStatus(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { + return extractCSINode(cSINode, fieldManager, "status") +} + +func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) { b := &CSINodeApplyConfiguration{} - err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSINode"), fieldManager, b) + err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSINode"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go index e0c6bd6352059..afd2e2a9ef7dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go @@ -53,7 +53,7 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from // cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a // CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API. @@ -62,8 +62,19 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { + return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") +} + +// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { + return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") +} + +func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { b := &CSIStorageCapacityApplyConfiguration{} - err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b) + err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go index d88da6a20fd88..a4b924ee8cabc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go @@ -56,7 +56,7 @@ func StorageClass(name string) *StorageClassApplyConfiguration { // ExtractStorageClass extracts the applied configuration owned by fieldManager from // storageClass. If no managedFields are found in storageClass for fieldManager, a // StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API. @@ -65,8 +65,19 @@ func StorageClass(name string) *StorageClassApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { + return extractStorageClass(storageClass, fieldManager, "") +} + +// ExtractStorageClassStatus is the same as ExtractStorageClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStorageClassStatus(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { + return extractStorageClass(storageClass, fieldManager, "status") +} + +func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) { b := &StorageClassApplyConfiguration{} - err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b) + err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go index 3a3ed874f688b..553bfee98cbc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go @@ -49,7 +49,7 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from // volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a // VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. Is is possible that no managed fields were found for because other +// APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. // volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API. @@ -58,8 +58,19 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! func ExtractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { + return extractVolumeAttachment(volumeAttachment, fieldManager, "") +} + +// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { + return extractVolumeAttachment(volumeAttachment, fieldManager, "status") +} + +func extractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { b := &VolumeAttachmentApplyConfiguration{} - err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttachment"), fieldManager, b) + err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttachment"), fieldManager, b, subresource) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 57404e0b2b712..50e59c5d85cb6 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -20,12 +20,14 @@ import ( "context" "encoding/json" "fmt" + "net/http" "net/url" "sort" "strings" "sync" "time" + //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs "github.com/golang/protobuf/proto" openapi_v2 "github.com/googleapis/gnostic/openapiv2" @@ -41,7 +43,7 @@ import ( ) const ( - // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). + // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. CustomResourceDefinitions). defaultRetries = 2 // protobuf mime type mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" @@ -95,7 +97,7 @@ type ServerResourcesInterface interface { // // Deprecated: use ServerGroupsAndResources instead. ServerResources() ([]*metav1.APIResourceList, error) - // ServerResources returns the supported groups and resources for all groups and versions. + // ServerGroupsAndResources returns the supported groups and resources for all groups and versions. // // The returned group and resource lists might be non-nil with partial results even in the // case of non-nil error. @@ -481,12 +483,29 @@ func setDiscoveryDefaults(config *restclient.Config) error { // NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client // can be used to discover supported resources in the API server. +// NewDiscoveryClientForConfig is equivalent to NewDiscoveryClientForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { config := *c if err := setDiscoveryDefaults(&config); err != nil { return nil, err } - client, err := restclient.UnversionedRESTClientFor(&config) + httpClient, err := restclient.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewDiscoveryClientForConfigAndClient(&config, httpClient) +} + +// NewDiscoveryClientForConfigAndClient creates a new DiscoveryClient for the given config. This client +// can be used to discover supported resources in the API server. +// Note the http client provided takes precedence over the configured transport values. +func NewDiscoveryClientForConfigAndClient(c *restclient.Config, httpClient *http.Client) (*DiscoveryClient, error) { + config := *c + if err := setDiscoveryDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.UnversionedRESTClientForConfigAndClient(&config, httpClient) return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err } diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 55a236fac5d18..e46c0537f7855 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,6 +20,7 @@ package kubernetes import ( "fmt" + "net/http" discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" @@ -33,6 +34,7 @@ import ( authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + autoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" @@ -49,6 +51,7 @@ import ( extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" @@ -82,6 +85,7 @@ type Interface interface { AuthorizationV1() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface + AutoscalingV2() autoscalingv2.AutoscalingV2Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface @@ -98,6 +102,7 @@ type Interface interface { ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface + FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface NodeV1() nodev1.NodeV1Interface @@ -131,6 +136,7 @@ type Clientset struct { authorizationV1 *authorizationv1.AuthorizationV1Client authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2 *autoscalingv2.AutoscalingV2Client autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client batchV1 *batchv1.BatchV1Client @@ -147,6 +153,7 @@ type Clientset struct { extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client + flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client nodeV1 *nodev1.NodeV1Client @@ -220,6 +227,11 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } +// AutoscalingV2 retrieves the AutoscalingV2Client +func (c *Clientset) AutoscalingV2() autoscalingv2.AutoscalingV2Interface { + return c.autoscalingV2 +} + // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -300,6 +312,11 @@ func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1In return c.flowcontrolV1beta1 } +// FlowcontrolV1beta2 retrieves the FlowcontrolV1beta2Client +func (c *Clientset) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface { + return c.flowcontrolV1beta2 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 @@ -391,190 +408,221 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.internalV1alpha1, err = internalv1alpha1.NewForConfig(&configShallowCopy) + cs.internalV1alpha1, err = internalv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + cs.appsV1, err = appsv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) + cs.appsV1beta1, err = appsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.appsV1beta2, err = appsv1beta2.NewForConfig(&configShallowCopy) + cs.appsV1beta2, err = appsv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) + cs.authenticationV1, err = authenticationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authorizationV1, err = authorizationv1.NewForConfig(&configShallowCopy) + cs.authorizationV1, err = authorizationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy) + cs.autoscalingV1, err = autoscalingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfig(&configShallowCopy) + cs.autoscalingV2, err = autoscalingv2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfig(&configShallowCopy) + cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy) + cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.batchV1beta1, err = batchv1beta1.NewForConfig(&configShallowCopy) + cs.batchV1, err = batchv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.certificatesV1, err = certificatesv1.NewForConfig(&configShallowCopy) + cs.batchV1beta1, err = batchv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + cs.certificatesV1, err = certificatesv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfig(&configShallowCopy) + cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) + cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) + cs.coordinationV1, err = coordinationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.discoveryV1, err = discoveryv1.NewForConfig(&configShallowCopy) + cs.coreV1, err = corev1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfig(&configShallowCopy) + cs.discoveryV1, err = discoveryv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.eventsV1, err = eventsv1.NewForConfig(&configShallowCopy) + cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) + cs.eventsV1, err = eventsv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + cs.eventsV1beta1, err = eventsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfig(&configShallowCopy) + cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfig(&configShallowCopy) + cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) + cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) + cs.flowcontrolV1beta2, err = flowcontrolv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.nodeV1, err = nodev1.NewForConfig(&configShallowCopy) + cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) + cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) + cs.nodeV1, err = nodev1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.policyV1, err = policyv1.NewForConfig(&configShallowCopy) + cs.nodeV1alpha1, err = nodev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) + cs.nodeV1beta1, err = nodev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.rbacV1, err = rbacv1.NewForConfig(&configShallowCopy) + cs.policyV1, err = policyv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.rbacV1beta1, err = rbacv1beta1.NewForConfig(&configShallowCopy) + cs.policyV1beta1, err = policyv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + cs.rbacV1, err = rbacv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + cs.rbacV1beta1, err = rbacv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfig(&configShallowCopy) + cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) + cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.storageV1, err = storagev1.NewForConfig(&configShallowCopy) + cs.schedulingV1, err = schedulingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) + cs.storageV1beta1, err = storagev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.storageV1, err = storagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -584,53 +632,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) - cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) - cs.internalV1alpha1 = internalv1alpha1.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) - cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) - cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) - cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) - cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) - cs.authorizationV1beta1 = authorizationv1beta1.NewForConfigOrDie(c) - cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c) - cs.autoscalingV2beta1 = autoscalingv2beta1.NewForConfigOrDie(c) - cs.autoscalingV2beta2 = autoscalingv2beta2.NewForConfigOrDie(c) - cs.batchV1 = batchv1.NewForConfigOrDie(c) - cs.batchV1beta1 = batchv1beta1.NewForConfigOrDie(c) - cs.certificatesV1 = certificatesv1.NewForConfigOrDie(c) - cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) - cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) - cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) - cs.coreV1 = corev1.NewForConfigOrDie(c) - cs.discoveryV1 = discoveryv1.NewForConfigOrDie(c) - cs.discoveryV1beta1 = discoveryv1beta1.NewForConfigOrDie(c) - cs.eventsV1 = eventsv1.NewForConfigOrDie(c) - cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) - cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) - cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) - cs.flowcontrolV1beta1 = flowcontrolv1beta1.NewForConfigOrDie(c) - cs.networkingV1 = networkingv1.NewForConfigOrDie(c) - cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) - cs.nodeV1 = nodev1.NewForConfigOrDie(c) - cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) - cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) - cs.policyV1 = policyv1.NewForConfigOrDie(c) - cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) - cs.rbacV1 = rbacv1.NewForConfigOrDie(c) - cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) - cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) - cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) - cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) - cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) - cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) - cs.storageV1 = storagev1.NewForConfigOrDie(c) - cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. @@ -647,6 +653,7 @@ func New(c rest.Interface) *Clientset { cs.authorizationV1 = authorizationv1.New(c) cs.authorizationV1beta1 = authorizationv1beta1.New(c) cs.autoscalingV1 = autoscalingv1.New(c) + cs.autoscalingV2 = autoscalingv2.New(c) cs.autoscalingV2beta1 = autoscalingv2beta1.New(c) cs.autoscalingV2beta2 = autoscalingv2beta2.New(c) cs.batchV1 = batchv1.New(c) @@ -663,6 +670,7 @@ func New(c rest.Interface) *Clientset { cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) + cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) cs.nodeV1 = nodev1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index a46fb296291df..b41466151d4cd 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -30,6 +30,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" batchv1 "k8s.io/api/batch/v1" @@ -46,6 +47,7 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" @@ -84,6 +86,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ authorizationv1.AddToScheme, authorizationv1beta1.AddToScheme, autoscalingv1.AddToScheme, + autoscalingv2.AddToScheme, autoscalingv2beta1.AddToScheme, autoscalingv2beta2.AddToScheme, batchv1.AddToScheme, @@ -100,6 +103,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, flowcontrolv1beta1.AddToScheme, + flowcontrolv1beta2.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go index 751273159e5f5..10848bed17a76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/admissionregistration/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() Valida } // NewForConfig creates a new AdmissionregistrationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AdmissionregistrationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AdmissionregistrationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmissionregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 2d93ff02ee64b..8fda84b1d282e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() V } // NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AdmissionregistrationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index e43a9a3687d1c..1794cb941de33 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *InternalV1alpha1Client) StorageVersions() StorageVersionInterface { } // NewForConfig creates a new InternalV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*InternalV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new InternalV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*InternalV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 621c734afa0f4..397542eeb49dc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/apps/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { } // NewForConfig creates a new AppsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AppsV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index 51545107d5268..ccc2049ff7f4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -30,6 +30,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -55,6 +56,7 @@ type DeploymentInterface interface { ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) DeploymentExpansion } @@ -287,3 +289,28 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc Into(result) return } + +// ApplyScale takes top resource name and the apply declarative configuration for scale, +// applies it and returns the applied scale, and an error, if there is any. +func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { + if scale == nil { + return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(scale) + if err != nil { + return nil, err + } + + result = &autoscalingv1.Scale{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 7a8f0cd84c83f..917ed521f4f3b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -30,6 +30,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -55,6 +56,7 @@ type ReplicaSetInterface interface { ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) ReplicaSetExpansion } @@ -287,3 +289,28 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc Into(result) return } + +// ApplyScale takes top resource name and the apply declarative configuration for scale, +// applies it and returns the applied scale, and an error, if there is any. +func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { + if scale == nil { + return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(scale) + if err != nil { + return nil, err + } + + result = &autoscalingv1.Scale{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index 5626e2baacc32..d1fbb915d82d0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -30,6 +30,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -55,6 +56,7 @@ type StatefulSetInterface interface { ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) StatefulSetExpansion } @@ -287,3 +289,28 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, Into(result) return } + +// ApplyScale takes top resource name and the apply declarative configuration for scale, +// applies it and returns the applied scale, and an error, if there is any. +func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { + if scale == nil { + return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(scale) + if err != nil { + return nil, err + } + + result = &autoscalingv1.Scale{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index e5dd64d98332a..6b7148c5a8693 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -49,12 +51,28 @@ func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface } // NewForConfig creates a new AppsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 7ca4e0b20d6cb..968abc56f8040 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "net/http" + v1beta2 "k8s.io/api/apps/v1beta2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface } // NewForConfig creates a new AppsV1beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AppsV1beta2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 73a12c9966c7e..0416675d6d89f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -54,6 +54,7 @@ type StatefulSetInterface interface { ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) + ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta2.Scale, error) StatefulSetExpansion } @@ -286,3 +287,28 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, Into(result) return } + +// ApplyScale takes top resource name and the apply declarative configuration for scale, +// applies it and returns the applied scale, and an error, if there is any. +func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) { + if scale == nil { + return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(scale) + if err != nil { + return nil, err + } + + result = &v1beta2.Scale{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index de8864e22abec..aea9d0e133e5a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/authentication/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { } // NewForConfig creates a new AuthenticationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthenticationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthenticationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 816bd0a2cd9ef..218cb60c31744 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/authentication/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { } // NewForConfig creates a new AuthenticationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthenticationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthenticationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index 2cc2263222f1e..edfc90346a0d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/authorization/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterf } // NewForConfig creates a new AuthorizationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthorizationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 88eac75b764db..23b0edf272f67 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/authorization/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewI } // NewForConfig creates a new AuthorizationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthorizationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthorizationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 4f3e96aec57c5..f3a2752cbbea1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/autoscaling/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) Horizon } // NewForConfig creates a new AutoscalingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go new file mode 100644 index 0000000000000..04d5d0f949851 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +import ( + "net/http" + + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV2Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AutoscalingV2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AutoscalingV2Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2Client { + return &AutoscalingV2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go new file mode 100644 index 0000000000000..86fb4bf582fb1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go new file mode 100644 index 0000000000000..0470400fdf860 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go new file mode 100644 index 0000000000000..3a077d71dae59 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*v2.HorizontalPodAutoscalerList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(horizontalPodAutoscaler). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(horizontalPodAutoscaler). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(horizontalPodAutoscaler). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { + if horizontalPodAutoscaler == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(horizontalPodAutoscaler) + if err != nil { + return nil, err + } + name := horizontalPodAutoscaler.Name + if name == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") + } + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { + if horizontalPodAutoscaler == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(horizontalPodAutoscaler) + if err != nil { + return nil, err + } + + name := horizontalPodAutoscaler.Name + if name == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") + } + + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index c1a91fc3e734c..d1dde5ed1b70e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta1 import ( + "net/http" + v2beta1 "k8s.io/api/autoscaling/v2beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) Ho } // NewForConfig creates a new AutoscalingV2beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV2beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV2beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index bd2b392701fd4..cae1b4e43a5b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta2 import ( + "net/http" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AutoscalingV2beta2Client) HorizontalPodAutoscalers(namespace string) Ho } // NewForConfig creates a new AutoscalingV2beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AutoscalingV2beta2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV2beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV2beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index ba414eebc7810..eee144f71196c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/batch/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *BatchV1Client) Jobs(namespace string) JobInterface { } // NewForConfig creates a new BatchV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*BatchV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BatchV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BatchV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index 257085358e180..ebbf063ec2313 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/batch/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *BatchV1beta1Client) CronJobs(namespace string) CronJobInterface { } // NewForConfig creates a new BatchV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*BatchV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BatchV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BatchV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go index 25aea93c79740..6d87c539eb4bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/certificates/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CertificatesV1Client) CertificateSigningRequests() CertificateSigningRe } // NewForConfig creates a new CertificatesV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CertificatesV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index 1c52d551b2d68..fa97b441de2ac 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/certificates/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSign } // NewForConfig creates a new CertificatesV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go index 0df7b71bf2dbf..e19469d530f97 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/coordination/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { } // NewForConfig creates a new CoordinationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index d68ed5d3448b4..27d674e239e51 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/coordination/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CoordinationV1beta1Client) Leases(namespace string) LeaseInterface { } // NewForConfig creates a new CoordinationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CoordinationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 428d2afa3bc28..6e59e4cc6b41c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -114,12 +116,28 @@ func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface } // NewForConfig creates a new CoreV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CoreV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoreV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 211cf0603c09c..a3fdf57a98793 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -34,6 +34,7 @@ type EventExpansion interface { CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // PatchWithEventNamespace is the same as a Patch, except that it sends the request to the event.Namespace. PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) // Search finds events about the specified object Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) @@ -66,6 +67,9 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } result := &v1.Event{} err := e.client.Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 14b7bd0f04a83..63122cf3fb47a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -52,8 +52,7 @@ type PodInterface interface { Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) - GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) - UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) PodExpansion } @@ -258,30 +257,16 @@ func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguratio return } -// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any. -func (c *pods) GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { - result = &v1.EphemeralContainers{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(podName). - SubResource("ephemeralcontainers"). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. -func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (result *v1.EphemeralContainers, err error) { - result = &v1.EphemeralContainers{} +// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(podName). SubResource("ephemeralcontainers"). VersionedParams(&opts, scheme.ParameterCodec). - Body(ephemeralContainers). + Body(pod). Do(ctx). Into(result) return diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index 759fe0ff4a2d5..8b6e0e932f92d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -20,7 +20,8 @@ import ( "context" v1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/kubernetes/scheme" @@ -30,7 +31,16 @@ import ( // The PodExpansion interface allows manually adding extra methods to the PodInterface. type PodExpansion interface { Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error - Evict(ctx context.Context, eviction *policy.Eviction) error + // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource. + // Equivalent to calling EvictV1beta1. + // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1(). + Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error + // EvictV1 submits a policy/v1 Eviction request to the pod's eviction subresource. + // Supported in 1.22+. + EvictV1(ctx context.Context, eviction *policyv1.Eviction) error + // EvictV1beta1 submits a policy/v1beta1 Eviction request to the pod's eviction subresource. + // Supported in 1.22+. + EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper } @@ -40,7 +50,18 @@ func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.Create return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() } -func (c *pods) Evict(ctx context.Context, eviction *policy.Eviction) error { +// Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource. +// Equivalent to calling EvictV1beta1. +// Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1(). +func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() +} + +func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() +} + +func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go index cb26327626442..9041443b38a3f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/discovery/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *DiscoveryV1Client) EndpointSlices(namespace string) EndpointSliceInterf } // NewForConfig creates a new DiscoveryV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new DiscoveryV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go index 997239a95088c..193d5e9ebb69a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceI } // NewForConfig creates a new DiscoveryV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new DiscoveryV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go index 9230ca5ca7c6d..8c73918d1c5ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/events/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *EventsV1Client) Events(namespace string) EventInterface { } // NewForConfig creates a new EventsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*EventsV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new EventsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index e0ae41dfe750b..7213193bf1b52 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -33,7 +33,7 @@ type EventExpansion interface { // UpdateWithEventNamespace is the same as a Update // except that it sends the request to the event.Namespace. UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) - // PatchWithEventNamespace is the same as an Update + // PatchWithEventNamespace is the same as a Patch // except that it sends the request to the event.Namespace. PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index e372ccffac822..66506bf88e92c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/events/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *EventsV1beta1Client) Events(namespace string) EventInterface { } // NewForConfig creates a new EventsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new EventsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 45c90aca3da7e..c41d8dbc2609b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -54,6 +54,7 @@ type DeploymentInterface interface { ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) + ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error) DeploymentExpansion } @@ -286,3 +287,28 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc Into(result) return } + +// ApplyScale takes top resource name and the apply declarative configuration for scale, +// applies it and returns the applied scale, and an error, if there is any. +func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { + if scale == nil { + return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(scale) + if err != nil { + return nil, err + } + + result = &v1beta1.Scale{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index e3b22aa44cb13..827b514df6f46 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -64,12 +66,28 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf } // NewForConfig creates a new ExtensionsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ExtensionsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ExtensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index ee897f75adf42..3c907a3a048f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -54,6 +54,7 @@ type ReplicaSetInterface interface { ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) + ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error) ReplicaSetExpansion } @@ -286,3 +287,28 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc Into(result) return } + +// ApplyScale takes top resource name and the apply declarative configuration for scale, +// applies it and returns the applied scale, and an error, if there is any. +func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { + if scale == nil { + return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(scale) + if err != nil { + return nil, err + } + + result = &v1beta1.Scale{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go index 37a1ff2d7b3aa..c6f2d940560ad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelC } // NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go index 9a8ba560e6c25..c29cfca95730c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/flowcontrol/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *FlowcontrolV1beta1Client) PriorityLevelConfigurations() PriorityLevelCo } // NewForConfig creates a new FlowcontrolV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*FlowcontrolV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go new file mode 100644 index 0000000000000..56518ef7f2ab7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go new file mode 100644 index 0000000000000..f3cca4fc7536f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "net/http" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta2Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta2Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta2Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta2Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta2Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta2Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta2Client { + return &FlowcontrolV1beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go new file mode 100644 index 0000000000000..3a1f12b6a28d6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. +func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + result = &v1beta2.FlowSchema{} + err = c.client.Patch(types.ApplyPatchType). + Resource("flowschemas"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + + result = &v1beta2.FlowSchema{} + err = c.client.Patch(types.ApplyPatchType). + Resource("flowschemas"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go new file mode 100644 index 0000000000000..1d4477006cd9a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go new file mode 100644 index 0000000000000..f028869f172d1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Patch(types.ApplyPatchType). + Resource("prioritylevelconfigurations"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Patch(types.ApplyPatchType). + Resource("prioritylevelconfigurations"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index c83b93575e6f8..3b72a7ae92976 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/networking/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -49,12 +51,28 @@ func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInte } // NewForConfig creates a new NetworkingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 849ac219f63cd..851634ed0f9a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/networking/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface { } // NewForConfig creates a new NetworkingV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go index 7f0da811b22df..844f9fc70fefa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/node/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *NodeV1Client) RuntimeClasses() RuntimeClassInterface { } // NewForConfig creates a new NodeV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NodeV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NodeV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index e7acc27e4047d..2a197d58e6475 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/node/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { } // NewForConfig creates a new NodeV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NodeV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go index b38d9acac698f..4f6802ffaca56 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/node/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { } // NewForConfig creates a new NodeV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NodeV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go new file mode 100644 index 0000000000000..cd1aac9c29a33 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// EvictionsGetter has a method to return a EvictionInterface. +// A group's client should implement this interface. +type EvictionsGetter interface { + Evictions(namespace string) EvictionInterface +} + +// EvictionInterface has methods to work with Eviction resources. +type EvictionInterface interface { + EvictionExpansion +} + +// evictions implements EvictionInterface +type evictions struct { + client rest.Interface + ns string +} + +// newEvictions returns a Evictions +func newEvictions(c *PolicyV1Client, namespace string) *evictions { + return &evictions{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go new file mode 100644 index 0000000000000..853187feb5d1a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + policy "k8s.io/api/policy/v1" +) + +// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. +type EvictionExpansion interface { + Evict(ctx context.Context, eviction *policy.Eviction) error +} + +func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { + return c.client.Post(). + AbsPath("/api/v1"). + Namespace(eviction.Namespace). + Resource("pods"). + Name(eviction.Name). + SubResource("eviction"). + Body(eviction). + Do(ctx). + Error() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go index bb05a686a67ea..9bfd98aa9f3d0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/policy/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -26,6 +28,7 @@ import ( type PolicyV1Interface interface { RESTClient() rest.Interface + EvictionsGetter PodDisruptionBudgetsGetter } @@ -34,17 +37,37 @@ type PolicyV1Client struct { restClient rest.Interface } +func (c *PolicyV1Client) Evictions(namespace string) EvictionInterface { + return newEvictions(c, namespace) +} + func (c *PolicyV1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { return newPodDisruptionBudgets(c, namespace) } // NewForConfig creates a new PolicyV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*PolicyV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new PolicyV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*PolicyV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 8b8b22c6dedf7..5b65c9c0aa1f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -49,12 +51,28 @@ func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { } // NewForConfig creates a new PolicyV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new PolicyV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*PolicyV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index 1bc0179c62ef7..a02f0357d947d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/rbac/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface { } // NewForConfig creates a new RbacV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*RbacV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RbacV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index efbbc68be938b..cc5b309e909d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/rbac/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface } // NewForConfig creates a new RbacV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RbacV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 4db94cfad7329..8dac5c1d4bae9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface } // NewForConfig creates a new RbacV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RbacV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go index 5028bac892481..11fc4b9f39f49 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/scheduling/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { } // NewForConfig creates a new SchedulingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SchedulingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 83bc0b8a91d31..47fb774a37e38 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface { } // NewForConfig creates a new SchedulingV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SchedulingV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index 373f5cca88654..dbaf69414166e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/scheduling/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *SchedulingV1beta1Client) PriorityClasses() PriorityClassInterface { } // NewForConfig creates a new SchedulingV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SchedulingV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SchedulingV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index f03beae855074..b31862f439a3f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/storage/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { } // NewForConfig creates a new StorageV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StorageV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StorageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 9686923004ed0..c9bf11d766c37 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/storage/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { } // NewForConfig creates a new StorageV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StorageV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 19267b3625009..4c7604bd2960e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { } // NewForConfig creates a new StorageV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StorageV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StorageV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go new file mode 100644 index 0000000000000..9040bb9a464af --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/pkg/apis/clientauthentication" + "k8s.io/client-go/pkg/apis/clientauthentication/v1" + "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(clientauthentication.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go index c10899792037c..8daaa3f8f734a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -47,7 +47,7 @@ type ExecCredentialSpec struct { Response *Response // Interactive is true when the transport detects the command is being called from an - // interactive prompt. + // interactive prompt, i.e., when stdin has been passed to this exec plugin. // +optional Interactive bool diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go new file mode 100644 index 0000000000000..5c5f70d259c48 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go @@ -0,0 +1,28 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // This conversion intentionally omits the Response field, which were only + // supported in v1alpha1. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go new file mode 100644 index 0000000000000..94ca35c2c9145 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=client.authentication.k8s.io + +package v1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go new file mode 100644 index 0000000000000..bfc719a42aaa4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go new file mode 100644 index 0000000000000..0c4754dd26962 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredential is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta `json:",inline"` + + // Spec holds information passed to the plugin by the transport. + Spec ExecCredentialSpec `json:"spec,omitempty"` + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus `json:"status,omitempty"` +} + +// ExecCredentialSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster `json:"cluster,omitempty"` + + // Interactive declares whether stdin has been passed to this exec plugin. + Interactive bool `json:"interactive"` +} + +// ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` + // Token is a bearer token used by the client for request authentication. + Token string `json:"token,omitempty" datapolicy:"token"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` +} + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string `json:"tls-server-name,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string `json:"proxy-url,omitempty"` + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.RawExtension `json:"config,omitempty"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go new file mode 100644 index 0000000000000..277d9d93ee68e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go @@ -0,0 +1,201 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_Cluster_To_v1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function. +func Convert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + return autoConvert_v1_Cluster_To_clientauthentication_Cluster(in, out, s) +} + +func autoConvert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_clientauthentication_Cluster_To_v1_Cluster is an autogenerated conversion function. +func Convert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_clientauthentication_Cluster_To_v1_Cluster(in, out, s) +} + +func autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + if err := Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_v1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. +func Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + return autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + if err := Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_clientauthentication_ExecCredential_To_v1_ExecCredential is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in, out, s) +} + +func autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(clientauthentication.Cluster) + if err := Convert_v1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } + out.Interactive = in.Interactive + return nil +} + +// Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. +func Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // WARNING: in.Response requires manual conversion: does not exist in peer-type + out.Interactive = in.Interactive + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + if err := Convert_clientauthentication_Cluster_To_v1_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } + return nil +} + +func autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. +func Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..893933c45f120 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,120 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go new file mode 100644 index 0000000000000..dac177e93bd0d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go index b0e503af4933a..fc59decef5e7c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go index a73d31b3f14ba..ce614c0b8795c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go index dd621a3acda82..5070cb91b90f1 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go index 441b7c44bd938..6741114dd8d08 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go @@ -22,7 +22,7 @@ import ( ) func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - // This conversion intentionally omits the Response and Interactive fields, which were only + // This conversion intentionally omits the Response field, which were only // supported in v1alpha1. return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go index fabc6f65e6c60..714b0273ae932 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -46,6 +46,9 @@ type ExecCredentialSpec struct { // ExecConfig.ProvideClusterInfo). // +optional Cluster *Cluster `json:"cluster,omitempty"` + + // Interactive declares whether stdin has been passed to this exec plugin. + Interactive bool `json:"interactive"` } // ExecCredentialStatus holds credentials for the transport to use. diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 90f7935fefe87..c82993897d046 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -149,6 +150,7 @@ func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredenti } else { out.Cluster = nil } + out.Interactive = in.Interactive return nil } @@ -159,7 +161,7 @@ func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSp func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { // WARNING: in.Response requires manual conversion: does not exist in peer-type - // WARNING: in.Interactive requires manual conversion: does not exist in peer-type + out.Interactive = in.Interactive if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go index 3a72ece0c6f34..d7a801c27d70f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go index 73e63fc114d33..198b5be4af534 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index 045b07f5b054e..3103629f6173c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes deleted file mode 100644 index 7e349eff60bd6..0000000000000 --- a/vendor/k8s.io/client-go/pkg/version/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -base.go export-subst diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index b23e57dde6366..e405e3dc1290c 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -35,20 +35,21 @@ import ( "github.com/davecgh/go-spew/spew" "golang.org/x/term" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/clock" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/pkg/apis/clientauthentication" - "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" - "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/pkg/apis/clientauthentication/install" + clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1" + clientauthenticationv1alpha1 "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" + clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -63,10 +64,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(v1alpha1.AddToScheme(scheme)) - utilruntime.Must(v1beta1.AddToScheme(scheme)) - utilruntime.Must(clientauthentication.AddToScheme(scheme)) + install.Install(scheme) } var ( @@ -75,8 +73,9 @@ var ( globalCache = newCache() // The list of API versions we accept. apiVersions = map[string]schema.GroupVersion{ - v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion, - v1beta1.SchemeGroupVersion.String(): v1beta1.SchemeGroupVersion, + clientauthenticationv1alpha1.SchemeGroupVersion.String(): clientauthenticationv1alpha1.SchemeGroupVersion, + clientauthenticationv1beta1.SchemeGroupVersion.String(): clientauthenticationv1beta1.SchemeGroupVersion, + clientauthenticationv1.SchemeGroupVersion.String(): clientauthenticationv1.SchemeGroupVersion, } ) @@ -162,10 +161,10 @@ func (s *sometimes) Do(f func()) { // GetAuthenticator returns an exec-based plugin for providing client credentials. func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { - return newAuthenticator(globalCache, config, cluster) + return newAuthenticator(globalCache, term.IsTerminal, config, cluster) } -func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { +func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { key := cacheKey(config, cluster) if a, ok := c.get(key); ok { return a, nil @@ -196,11 +195,11 @@ func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentic clock: clock.RealClock{}, }, - stdin: os.Stdin, - stderr: os.Stderr, - interactive: term.IsTerminal(int(os.Stdin.Fd())), - now: time.Now, - environ: os.Environ, + stdin: os.Stdin, + stderr: os.Stderr, + interactiveFunc: func() (bool, error) { return isInteractive(isTerminalFunc, config) }, + now: time.Now, + environ: os.Environ, defaultDialer: defaultDialer, connTracker: connTracker, @@ -213,6 +212,33 @@ func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentic return c.put(key, a), nil } +func isInteractive(isTerminalFunc func(int) bool, config *api.ExecConfig) (bool, error) { + var shouldBeInteractive bool + switch config.InteractiveMode { + case api.NeverExecInteractiveMode: + shouldBeInteractive = false + case api.IfAvailableExecInteractiveMode: + shouldBeInteractive = !config.StdinUnavailable && isTerminalFunc(int(os.Stdin.Fd())) + case api.AlwaysExecInteractiveMode: + if !isTerminalFunc(int(os.Stdin.Fd())) { + return false, errors.New("standard input is not a terminal") + } + if config.StdinUnavailable { + suffix := "" + if len(config.StdinUnavailableMessage) > 0 { + // only print extra ": " if the user actually specified a message + suffix = fmt.Sprintf(": %s", config.StdinUnavailableMessage) + } + return false, fmt.Errorf("standard input is unavailable%s", suffix) + } + shouldBeInteractive = true + default: + return false, fmt.Errorf("unknown interactiveMode: %q", config.InteractiveMode) + } + + return shouldBeInteractive, nil +} + // Authenticator is a client credential provider that rotates credentials by executing a plugin. // The plugin input and output are defined by the API group client.authentication.k8s.io. type Authenticator struct { @@ -231,11 +257,11 @@ type Authenticator struct { installHint string // Stubbable for testing - stdin io.Reader - stderr io.Writer - interactive bool - now func() time.Time - environ func() []string + stdin io.Reader + stderr io.Writer + interactiveFunc func() (bool, error) + now func() time.Time + environ func() []string // defaultDialer is used for clients which don't specify a custom dialer defaultDialer *connrotation.Dialer @@ -263,8 +289,9 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { // setting up the transport, as that triggers the exec action if the server is // also configured to allow client certificates for authentication. For requests // like "kubectl get --token (token) pods" we should assume the intention is to - // use the provided token for authentication. - if c.HasTokenAuth() { + // use the provided token for authentication. The same can be said for when the + // user specifies basic auth. + if c.HasTokenAuth() || c.HasBasicAuth() { return nil } @@ -290,11 +317,17 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { return nil } +var _ utilnet.RoundTripperWrapper = &roundTripper{} + type roundTripper struct { a *Authenticator base http.RoundTripper } +func (r *roundTripper) WrappedRoundTripper() http.RoundTripper { + return r.base +} + func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // If a user has already set credentials, use that. This makes commands like // "kubectl get --token (token) pods" work. @@ -375,10 +408,15 @@ func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentic // refreshCredsLocked executes the plugin and reads the credentials from // stdout. It must be called while holding the Authenticator's mutex. func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error { + interactive, err := a.interactiveFunc() + if err != nil { + return fmt.Errorf("exec plugin cannot support interactive mode: %w", err) + } + cred := &clientauthentication.ExecCredential{ Spec: clientauthentication.ExecCredentialSpec{ Response: r, - Interactive: a.interactive, + Interactive: interactive, }, } if a.provideClusterInfo { @@ -397,7 +435,7 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err cmd.Env = env cmd.Stderr = a.stderr cmd.Stdout = stdout - if a.interactive { + if interactive { cmd.Stdin = a.stdin } @@ -461,7 +499,7 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err if oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { // Can be nil if the exec auth plugin only returned token auth. if oldCreds.cert != nil && oldCreds.cert.Leaf != nil { - metrics.ClientCertRotationAge.Observe(time.Now().Sub(oldCreds.cert.Leaf.NotBefore)) + metrics.ClientCertRotationAge.Observe(time.Since(oldCreds.cert.Leaf.NotBefore)) } a.connTracker.CloseAll() } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go index 3a2cc251a1f02..51210975a6668 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go @@ -18,6 +18,7 @@ package exec import ( "errors" + "io/fs" "os/exec" "reflect" "sync" @@ -92,6 +93,7 @@ func (c *certificateExpirationTracker) set(a *Authenticator, t time.Time) { func incrementCallsMetric(err error) { execExitError := &exec.ExitError{} execError := &exec.Error{} + pathError := &fs.PathError{} switch { case err == nil: // Binary execution succeeded. metrics.ExecPluginCalls.Increment(successExitCode, noError) @@ -99,7 +101,7 @@ func incrementCallsMetric(err error) { case errors.As(err, &execExitError): // Binary execution failed (see "os/exec".Cmd.Run()). metrics.ExecPluginCalls.Increment(execExitError.ExitCode(), pluginExecutionError) - case errors.As(err, &execError): // Binary does not exist (see exec.Error). + case errors.As(err, &execError), errors.As(err, &pathError): // Binary does not exist (see exec.Error, fs.PathError). metrics.ExecPluginCalls.Increment(failureExitCode, pluginNotFoundError) default: // We don't know about this error type. diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS index 0d574e0568e97..597484b4a4df2 100644 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - liggitt -- erictune - sttts - luxas - dims diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 3735750bbc53d..4909dc53abb39 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -202,6 +202,8 @@ func (c *Config) String() string { type ImpersonationConfig struct { // UserName is the username to impersonate on each request. UserName string + // UID is a unique value that identifies the user. + UID string // Groups are the groups to impersonate on each request. Groups []string // Extra is a free-form field which can be used to link some authentication information @@ -303,6 +305,8 @@ type ContentConfig struct { // object. Note that a RESTClient may require fields that are optional when initializing a Client. // A RESTClient created by this method is generic - it expects to operate on an API that follows // the Kubernetes conventions, but may not be the Kubernetes API. +// RESTClientFor is equivalent to calling RESTClientForConfigAndClient(config, httpClient), +// where httpClient was generated with HTTPClientFor(config). func RESTClientFor(config *Config) (*RESTClient, error) { if config.GroupVersion == nil { return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") @@ -311,22 +315,38 @@ func RESTClientFor(config *Config) (*RESTClient, error) { return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + // Validate config.Host before constructing the transport/client so we can fail fast. + // ServerURL will be obtained later in RESTClientForConfigAndClient() + _, _, err := defaultServerUrlFor(config) if err != nil { return nil, err } - transport, err := TransportFor(config) + httpClient, err := HTTPClientFor(config) if err != nil { return nil, err } - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - if config.Timeout > 0 { - httpClient.Timeout = config.Timeout - } + return RESTClientForConfigAndClient(config, httpClient) +} + +// RESTClientForConfigAndClient returns a RESTClient that satisfies the requested attributes on a +// client Config object. +// Unlike RESTClientFor, RESTClientForConfigAndClient allows to pass an http.Client that is shared +// between all the API Groups and Versions. +// Note that the http client takes precedence over the transport values configured. +// The http client defaults to the `http.DefaultClient` if nil. +func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err } rateLimiter := config.RateLimiter @@ -369,22 +389,31 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + // Validate config.Host before constructing the transport/client so we can fail fast. + // ServerURL will be obtained later in UnversionedRESTClientForConfigAndClient() + _, _, err := defaultServerUrlFor(config) if err != nil { return nil, err } - transport, err := TransportFor(config) + httpClient, err := HTTPClientFor(config) if err != nil { return nil, err } - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - if config.Timeout > 0 { - httpClient.Timeout = config.Timeout - } + return UnversionedRESTClientForConfigAndClient(config, httpClient) +} + +// UnversionedRESTClientForConfigAndClient is the same as RESTClientForConfigAndClient, +// except that it allows the config.Version to be empty. +func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err } rateLimiter := config.RateLimiter @@ -608,9 +637,10 @@ func CopyConfig(config *Config) *Config { BearerToken: config.BearerToken, BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ + UserName: config.Impersonate.UserName, + UID: config.Impersonate.UID, Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, - UserName: config.Impersonate.UserName, }, AuthProvider: config.AuthProvider, AuthConfigPersister: config.AuthConfigPersister, diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0ac0e8eab854c..5cc9900b04a6e 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -39,13 +39,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" - utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) var ( @@ -93,7 +93,6 @@ type Request struct { rateLimiter flowcontrol.RateLimiter backoff BackoffManager timeout time.Duration - maxRetries int // generic components accessible via method setters verb string @@ -110,8 +109,9 @@ type Request struct { subresource string // output - err error - body io.Reader + err error + body io.Reader + retry WithRetry } // NewRequest creates a new request helper object for accessing runtime.Objects on a server. @@ -142,7 +142,7 @@ func NewRequest(c *RESTClient) *Request { backoff: backoff, timeout: timeout, pathPrefix: pathPrefix, - maxRetries: 10, + retry: &withRetry{maxRetries: 10}, warningHandler: c.warningHandler, } @@ -408,10 +408,7 @@ func (r *Request) Timeout(d time.Duration) *Request { // function is specifically called with a different value. // A zero maxRetries prevent it from doing retires and return an error immediately. func (r *Request) MaxRetries(maxRetries int) *Request { - if maxRetries < 0 { - maxRetries = 0 - } - r.maxRetries = maxRetries + r.retry.SetMaxRetries(maxRetries) return r } @@ -602,7 +599,7 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err if latency > extraLongThrottleLatency { // If the rate limiter latency is very high, the log message should be printed at a higher log level, // but we use a throttled logger to prevent spamming. - globalThrottledLogger.Infof(message) + globalThrottledLogger.Infof("%s", message) } metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency) @@ -622,12 +619,12 @@ type throttleSettings struct { } type throttledLogger struct { - clock utilclock.PassiveClock + clock clock.PassiveClock settings []*throttleSettings } var globalThrottledLogger = &throttledLogger{ - clock: utilclock.RealClock{}, + clock: clock.RealClock{}, settings: []*throttleSettings{ { logLevel: 2, @@ -678,43 +675,88 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { return nil, r.err } - url := r.URL().String() - req, err := http.NewRequest(r.verb, url, r.body) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - req.Header = r.headers client := r.c.Client if client == nil { client = http.DefaultClient } - r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) - resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) - if r.c.base != nil { - if err != nil { - r.backoff.UpdateBackoff(r.c.base, err, 0) - } else { - r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode) - } - } - if err != nil { + + isErrRetryableFunc := func(request *http.Request, err error) bool { // The watch stream mechanism handles many common partial data errors, so closed // connections can be retried in many cases. if net.IsProbableEOF(err) || net.IsTimeout(err) { - return watch.NewEmptyWatch(), nil + return true } - return nil, err + return false } - if resp.StatusCode != http.StatusOK { - defer resp.Body.Close() - if result := r.transformResponse(resp, req); result.err != nil { - return nil, result.err + var retryAfter *RetryAfter + url := r.URL().String() + for { + req, err := r.newHTTPRequest(ctx) + if err != nil { + return nil, err + } + + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) + if retryAfter != nil { + // We are retrying the request that we already send to apiserver + // at least once before. + // This request should also be throttled with the client-internal rate limiter. + if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil { + return nil, err + } + retryAfter = nil + } + + resp, err := client.Do(req) + updateURLMetrics(ctx, r, resp, err) + if r.c.base != nil { + if err != nil { + r.backoff.UpdateBackoff(r.c.base, err, 0) + } else { + r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode) + } + } + if err == nil && resp.StatusCode == http.StatusOK { + return r.newStreamWatcher(resp) + } + + done, transformErr := func() (bool, error) { + defer readAndCloseResponseBody(resp) + + var retry bool + retryAfter, retry = r.retry.NextRetry(req, resp, err, isErrRetryableFunc) + if retry { + err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body) + if err == nil { + return false, nil + } + klog.V(4).Infof("Could not retry request - %v", err) + } + + if resp == nil { + // the server must have sent us an error in 'err' + return true, nil + } + if result := r.transformResponse(resp, req); result.err != nil { + return true, result.err + } + return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) + }() + if done { + if isErrRetryableFunc(req, err) { + return watch.NewEmptyWatch(), nil + } + if err == nil { + // if the server sent us an HTTP Response object, + // we need to return the error object from that. + err = transformErr + } + return nil, err } - return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) } +} +func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) { contentType := resp.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { @@ -769,49 +811,75 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { return nil, err } - url := r.URL().String() - req, err := http.NewRequest(r.verb, url, nil) - if err != nil { - return nil, err - } - if r.body != nil { - req.Body = ioutil.NopCloser(r.body) - } - req = req.WithContext(ctx) - req.Header = r.headers client := r.c.Client if client == nil { client = http.DefaultClient } - r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) - resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) - if r.c.base != nil { + + var retryAfter *RetryAfter + url := r.URL().String() + for { + req, err := r.newHTTPRequest(ctx) if err != nil { - r.backoff.UpdateBackoff(r.URL(), err, 0) - } else { - r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) + return nil, err + } + if r.body != nil { + req.Body = ioutil.NopCloser(r.body) } - } - if err != nil { - return nil, err - } - switch { - case (resp.StatusCode >= 200) && (resp.StatusCode < 300): - handleWarnings(resp.Header, r.warningHandler) - return resp.Body, nil + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) + if retryAfter != nil { + // We are retrying the request that we already send to apiserver + // at least once before. + // This request should also be throttled with the client-internal rate limiter. + if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil { + return nil, err + } + retryAfter = nil + } - default: - // ensure we close the body before returning the error - defer resp.Body.Close() + resp, err := client.Do(req) + updateURLMetrics(ctx, r, resp, err) + if r.c.base != nil { + if err != nil { + r.backoff.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + } + if err != nil { + // we only retry on an HTTP response with 'Retry-After' header + return nil, err + } - result := r.transformResponse(resp, req) - err := result.Error() - if err == nil { - err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) + switch { + case (resp.StatusCode >= 200) && (resp.StatusCode < 300): + handleWarnings(resp.Header, r.warningHandler) + return resp.Body, nil + + default: + done, transformErr := func() (bool, error) { + defer resp.Body.Close() + + var retry bool + retryAfter, retry = r.retry.NextRetry(req, resp, err, neverRetryError) + if retry { + err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body) + if err == nil { + return false, nil + } + klog.V(4).Infof("Could not retry request - %v", err) + } + result := r.transformResponse(resp, req) + if err := result.Error(); err != nil { + return true, err + } + return true, fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) + }() + if done { + return nil, transformErr + } } - return nil, err } } @@ -842,6 +910,17 @@ func (r *Request) requestPreflightCheck() error { return nil } +func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) { + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + req.Header = r.headers + return req, nil +} + // request connects to the server and invokes the provided function when a server response is // received. It handles retry behavior and up front validation of requests. It will invoke // fn at most once. It will return an error if a problem occurred prior to connecting to the @@ -881,27 +960,22 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp } // Right now we make about ten retry attempts if we get a Retry-After response. - retries := 0 - var retryInfo string + var retryAfter *RetryAfter for { - - url := r.URL().String() - req, err := http.NewRequest(r.verb, url, r.body) + req, err := r.newHTTPRequest(ctx) if err != nil { return err } - req = req.WithContext(ctx) - req.Header = r.headers r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) - if retries > 0 { + if retryAfter != nil { // We are retrying the request that we already send to apiserver // at least once before. // This request should also be throttled with the client-internal rate limiter. - if err := r.tryThrottleWithInfo(ctx, retryInfo); err != nil { + if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil { return err } - retryInfo = "" + retryAfter = nil } resp, err := client.Do(req) updateURLMetrics(ctx, r, resp, err) @@ -910,61 +984,45 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp } else { r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) } - if err != nil { - // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors. - // Thus in case of "GET" operations, we simply retry it. - // We are not automatically retrying "write" operations, as - // they are not idempotent. - if r.verb != "GET" { - return err - } - // For connection errors and apiserver shutdown errors retry. - if net.IsConnectionReset(err) || net.IsProbableEOF(err) { - // For the purpose of retry, we set the artificial "retry-after" response. - // TODO: Should we clean the original response if it exists? - resp = &http.Response{ - StatusCode: http.StatusInternalServerError, - Header: http.Header{"Retry-After": []string{"1"}}, - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } else { - return err - } - } done := func() bool { - // Ensure the response body is fully read and closed - // before we reconnect, so that we reuse the same TCP - // connection. - defer func() { - const maxBodySlurpSize = 2 << 10 - if resp.ContentLength <= maxBodySlurpSize { - io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) - } - resp.Body.Close() - }() + defer readAndCloseResponseBody(resp) - retries++ - if seconds, wait := checkWait(resp); wait && retries <= r.maxRetries { - retryInfo = getRetryReason(retries, seconds, resp, err) - if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { - _, err := seeker.Seek(0, 0) - if err != nil { - klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) - fn(req, resp) - return true - } + // if the the server returns an error in err, the response will be nil. + f := func(req *http.Request, resp *http.Response) { + if resp == nil { + return } + fn(req, resp) + } - klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) - r.backoff.Sleep(time.Duration(seconds) * time.Second) + var retry bool + retryAfter, retry = r.retry.NextRetry(req, resp, err, func(req *http.Request, err error) bool { + // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors. + // Thus in case of "GET" operations, we simply retry it. + // We are not automatically retrying "write" operations, as they are not idempotent. + if r.verb != "GET" { + return false + } + // For connection errors and apiserver shutdown errors retry. + if net.IsConnectionReset(err) || net.IsProbableEOF(err) { + return true + } return false + }) + if retry { + err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String(), r.body) + if err == nil { + return false + } + klog.V(4).Infof("Could not retry request - %v", err) } - fn(req, resp) + + f(req, resp) return true }() if done { - return nil + return err } } } @@ -1019,13 +1077,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) - streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) + streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err) return Result{ err: streamErr, } default: klog.Errorf("Unexpected error when reading response body: %v", err) - unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err) + unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err) return Result{ err: unexpectedErr, } @@ -1196,19 +1254,6 @@ func isTextResponse(resp *http.Response) bool { return strings.HasPrefix(media, "text/") } -// checkWait returns true along with a number of seconds if the server instructed us to wait -// before retrying. -func checkWait(resp *http.Response) (int, bool) { - switch r := resp.StatusCode; { - // any 500 error code and 429 can trigger a wait - case r == http.StatusTooManyRequests, r >= 500: - default: - return 0, false - } - i, ok := retryAfterSeconds(resp) - return i, ok -} - // retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if // the header was missing or not a valid number. func retryAfterSeconds(resp *http.Response) (int, bool) { @@ -1220,26 +1265,6 @@ func retryAfterSeconds(resp *http.Response) (int, bool) { return 0, false } -func getRetryReason(retries, seconds int, resp *http.Response, err error) string { - // priority and fairness sets the UID of the FlowSchema associated with a request - // in the following response Header. - const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" - - message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds) - - switch { - case resp.StatusCode == http.StatusTooManyRequests: - // it is server-side throttling from priority and fairness - flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID) - return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID) - case err != nil: - // it's a retriable error - return fmt.Sprintf("%s - retry-reason: due to retriable error, error: %v", message, err) - default: - return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode) - } -} - // Result contains the result of calling Request.Do(). type Result struct { body []byte diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 87792750ad3c5..7c38c6d92c189 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -26,6 +26,27 @@ import ( "k8s.io/client-go/transport" ) +// HTTPClientFor returns an http.Client that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultClient if no special case behavior is needed. +func HTTPClientFor(config *Config) (*http.Client, error) { + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + var httpClient *http.Client + if transport != http.DefaultTransport || config.Timeout > 0 { + httpClient = &http.Client{ + Transport: transport, + Timeout: config.Timeout, + } + } else { + httpClient = http.DefaultClient + } + + return httpClient, nil +} + // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(config *Config) (*tls.Config, error) { @@ -83,6 +104,7 @@ func (c *Config) TransportConfig() (*transport.Config, error) { BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, + UID: c.Impersonate.UID, Groups: c.Impersonate.Groups, Extra: c.Impersonate.Extra, }, diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go new file mode 100644 index 0000000000000..1b7360b534905 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -0,0 +1,232 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "k8s.io/klog/v2" +) + +// IsRetryableErrorFunc allows the client to provide its own function +// that determines whether the specified err from the server is retryable. +// +// request: the original request sent to the server +// err: the server sent this error to us +// +// The function returns true if the error is retryable and the request +// can be retried, otherwise it returns false. +// We have four mode of communications - 'Stream', 'Watch', 'Do' and 'DoRaw', this +// function allows us to customize the retryability aspect of each. +type IsRetryableErrorFunc func(request *http.Request, err error) bool + +func (r IsRetryableErrorFunc) IsErrorRetryable(request *http.Request, err error) bool { + return r(request, err) +} + +var neverRetryError = IsRetryableErrorFunc(func(_ *http.Request, _ error) bool { + return false +}) + +// WithRetry allows the client to retry a request up to a certain number of times +// Note that WithRetry is not safe for concurrent use by multiple +// goroutines without additional locking or coordination. +type WithRetry interface { + // SetMaxRetries makes the request use the specified integer as a ceiling + // for retries upon receiving a 429 status code and the "Retry-After" header + // in the response. + // A zero maxRetries should prevent from doing any retry and return immediately. + SetMaxRetries(maxRetries int) + + // NextRetry advances the retry counter appropriately and returns true if the + // request should be retried, otherwise it returns false if: + // - we have already reached the maximum retry threshold. + // - the error does not fall into the retryable category. + // - the server has not sent us a 429, or 5xx status code and the + // 'Retry-After' response header is not set with a value. + // + // if retry is set to true, retryAfter will contain the information + // regarding the next retry. + // + // request: the original request sent to the server + // resp: the response sent from the server, it is set if err is nil + // err: the server sent this error to us, if err is set then resp is nil. + // f: a IsRetryableErrorFunc function provided by the client that determines + // if the err sent by the server is retryable. + NextRetry(req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool) + + // BeforeNextRetry is responsible for carrying out operations that need + // to be completed before the next retry is initiated: + // - if the request context is already canceled there is no need to + // retry, the function will return ctx.Err(). + // - we need to seek to the beginning of the request body before we + // initiate the next retry, the function should return an error if + // it fails to do so. + // - we should wait the number of seconds the server has asked us to + // in the 'Retry-After' response header. + // + // If BeforeNextRetry returns an error the client should abort the retry, + // otherwise it is safe to initiate the next retry. + BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string, body io.Reader) error +} + +// RetryAfter holds information associated with the next retry. +type RetryAfter struct { + // Wait is the duration the server has asked us to wait before + // the next retry is initiated. + // This is the value of the 'Retry-After' response header in seconds. + Wait time.Duration + + // Attempt is the Nth attempt after which we have received a retryable + // error or a 'Retry-After' response header from the server. + Attempt int + + // Reason describes why we are retrying the request + Reason string +} + +type withRetry struct { + maxRetries int + attempts int +} + +func (r *withRetry) SetMaxRetries(maxRetries int) { + if maxRetries < 0 { + maxRetries = 0 + } + r.maxRetries = maxRetries +} + +func (r *withRetry) NextRetry(req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool) { + if req == nil || (resp == nil && err == nil) { + // bad input, we do nothing. + return nil, false + } + + r.attempts++ + retryAfter := &RetryAfter{Attempt: r.attempts} + if r.attempts > r.maxRetries { + return retryAfter, false + } + + // if the server returned an error, it takes precedence over the http response. + var errIsRetryable bool + if f != nil && err != nil && f.IsErrorRetryable(req, err) { + errIsRetryable = true + // we have a retryable error, for which we will create an + // artificial "Retry-After" response. + resp = retryAfterResponse() + } + if err != nil && !errIsRetryable { + return retryAfter, false + } + + // if we are here, we have either a or b: + // a: we have a retryable error, for which we already + // have an artificial "Retry-After" response. + // b: we have a response from the server for which we + // need to check if it is retryable + seconds, wait := checkWait(resp) + if !wait { + return retryAfter, false + } + + retryAfter.Wait = time.Duration(seconds) * time.Second + retryAfter.Reason = getRetryReason(r.attempts, seconds, resp, err) + return retryAfter, true +} + +func (r *withRetry) BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string, body io.Reader) error { + // Ensure the response body is fully read and closed before + // we reconnect, so that we reuse the same TCP connection. + if ctx.Err() != nil { + return ctx.Err() + } + + if seeker, ok := body.(io.Seeker); ok && body != nil { + if _, err := seeker.Seek(0, 0); err != nil { + return fmt.Errorf("can't Seek() back to beginning of body for %T", r) + } + } + + klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", retryAfter.Wait, retryAfter.Attempt, url) + if backoff != nil { + backoff.Sleep(retryAfter.Wait) + } + return nil +} + +// checkWait returns true along with a number of seconds if +// the server instructed us to wait before retrying. +func checkWait(resp *http.Response) (int, bool) { + switch r := resp.StatusCode; { + // any 500 error code and 429 can trigger a wait + case r == http.StatusTooManyRequests, r >= 500: + default: + return 0, false + } + i, ok := retryAfterSeconds(resp) + return i, ok +} + +func getRetryReason(retries, seconds int, resp *http.Response, err error) string { + // priority and fairness sets the UID of the FlowSchema + // associated with a request in the following response Header. + const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" + + message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds) + + switch { + case resp.StatusCode == http.StatusTooManyRequests: + // it is server-side throttling from priority and fairness + flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID) + return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID) + case err != nil: + // it's a retryable error + return fmt.Sprintf("%s - retry-reason: due to retryable error, error: %v", message, err) + default: + return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode) + } +} + +func readAndCloseResponseBody(resp *http.Response) { + if resp == nil { + return + } + + // Ensure the response body is fully read and closed + // before we reconnect, so that we reuse the same TCP + // connection. + const maxBodySlurpSize = 2 << 10 + defer resp.Body.Close() + + if resp.ContentLength <= maxBodySlurpSize { + io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) + } +} + +func retryAfterResponse() *http.Response { + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Header: http.Header{"Retry-After": []string{"1"}}, + } +} diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index da4a1624e81aa..86991be3b514e 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 6562ee5c7aa09..c4824d6bcefdc 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -20,7 +20,6 @@ reviewers: - caesarxuchao - mikedanese - liggitt -- erictune - davidopp - pmorie - janetkuo @@ -31,7 +30,6 @@ reviewers: - hongchaodeng - krousey - xiang90 -- mml - ingvagabund - resouer - jessfraz diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 684d1a8d388c2..c85c29db35abd 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -21,9 +21,9 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/clock" ) // This file implements a low-level controller that is used in @@ -322,10 +322,10 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) } -// NewIndexerInformer returns a Indexer and a controller for populating the index +// NewIndexerInformer returns an Indexer and a Controller for populating the index // while also providing event notifications. You should only used the returned // Index for Get/List operations; Add/Modify/Deletes will cause the event // notifications to be faulty. @@ -351,7 +351,59 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// TransformFunc allows for transforming an object before it will be processed +// and put into the controller cache and before the corresponding handlers will +// be called on it. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// given controller doesn't care for them +type TransformFunc func(interface{}) (interface{}, error) + +// NewTransformingInformer returns a Store and a controller for populating +// the store while also providing event notifications. You should only used +// the returned Store for Get/List operations; Add/Modify/Deletes will cause +// the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// put put into the Store and corresponding Add/Modify/Delete handlers will +// be invokved for them. +func NewTransformingInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + transformer TransformFunc, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// NewTransformingIndexerInformer returns an Indexer and a controller for +// populating the index while also providing event notifications. You should +// only used the returned Index for Get/List operations; Add/Modify/Deletes +// will cause the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// be put into the Index and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, + transformer TransformFunc, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) } // newInformer returns a controller for populating the store while also @@ -374,6 +426,7 @@ func newInformer( resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, + transformer TransformFunc, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set @@ -393,24 +446,33 @@ func newInformer( Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { + obj := d.Object + if transformer != nil { + var err error + obj, err = transformer(obj) + if err != nil { + return err + } + } + switch d.Type { case Sync, Replaced, Added, Updated: - if old, exists, err := clientState.Get(d.Object); err == nil && exists { - if err := clientState.Update(d.Object); err != nil { + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { return err } - h.OnUpdate(old, d.Object) + h.OnUpdate(old, obj) } else { - if err := clientState.Add(d.Object); err != nil { + if err := clientState.Add(obj); err != nil { return err } - h.OnAdd(d.Object) + h.OnAdd(obj) } case Deleted: - if err := clientState.Delete(d.Object); err != nil { + if err := clientState.Delete(obj); err != nil { return err } - h.OnDelete(d.Object) + h.OnDelete(obj) } } return nil diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f648673e11180..2da2933ab7489 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -20,10 +20,12 @@ import ( "errors" "fmt" "sync" + "time" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" ) // DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are @@ -121,7 +123,7 @@ type DeltaFIFO struct { knownObjects KeyListerGetter // Used to indicate a queue is closed so a control loop can exit when a queue is empty. - // Currently, not used to gate any of CRED operations. + // Currently, not used to gate any of CRUD operations. closed bool // emitDeltaTypeReplaced is whether to emit the Replaced or Sync @@ -153,7 +155,7 @@ const ( // change happened, and the object's state after* that change. // // [*] Unless the change is a deletion, and then you'll get the final -// state of the object before it was deleted. +// state of the object before it was deleted. type Delta struct { Type DeltaType Object interface{} @@ -174,9 +176,10 @@ type Deltas []Delta // modifications. // // TODO: consider merging keyLister with this object, tracking a list of -// "known" keys when Pop() is called. Have to think about how that -// affects error retrying. -// NOTE: It is possible to misuse this and cause a race when using an +// "known" keys when Pop() is called. Have to think about how that +// affects error retrying. +// +// NOTE: It is possible to misuse this and cause a race when using an // external known object source. // Whether there is a potential race depends on how the consumer // modifies knownObjects. In Pop(), process function is called under @@ -185,8 +188,7 @@ type Deltas []Delta // // Example: // In case of sharedIndexInformer being a consumer -// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/ -// src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), // there is no race as knownObjects (s.indexer) is modified safely // under DeltaFIFO's lock. The only exceptions are GetStore() and // GetIndexer() methods, which expose ways to modify the underlying @@ -340,7 +342,7 @@ func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { if !ok { return fmt.Errorf("object must be of type deltas, but got: %#v", obj) } - id, err := f.KeyOf(deltas.Newest().Object) + id, err := f.KeyOf(deltas) if err != nil { return KeyError{obj, err} } @@ -373,13 +375,8 @@ func dedupDeltas(deltas Deltas) Deltas { a := &deltas[n-1] b := &deltas[n-2] if out := isDup(a, b); out != nil { - // `a` and `b` are duplicates. Only keep the one returned from isDup(). - // TODO: This extra array allocation and copy seems unnecessary if - // all we do to dedup is compare the new delta with the last element - // in `items`, which could be done by mutating `items` directly. - // Might be worth profiling and investigating if it is safe to optimize. - d := append(Deltas{}, deltas[:n-2]...) - return append(d, *out) + deltas[n-2] = *out + return deltas[:n-1] } return deltas } @@ -461,8 +458,8 @@ func (f *DeltaFIFO) listLocked() []interface{} { func (f *DeltaFIFO) ListKeys() []string { f.lock.RLock() defer f.lock.RUnlock() - list := make([]string, 0, len(f.items)) - for key := range f.items { + list := make([]string, 0, len(f.queue)) + for _, key := range f.queue { list = append(list, key) } return list @@ -531,6 +528,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } id := f.queue[0] f.queue = f.queue[1:] + depth := len(f.queue) if f.initialPopulationCount > 0 { f.initialPopulationCount-- } @@ -541,6 +539,18 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) + // Only log traces if the queue depth is greater than 10 and it takes more than + // 100 milliseconds to process one item from the queue. + // Queue depth never goes high because processing an item is locking the queue, + // and new items can't be added until processing finish. + // https://github.com/kubernetes/kubernetes/issues/103789 + if depth > 10 { + trace := utiltrace.New("DeltaFIFO Pop Process", + utiltrace.Field{Key: "ID", Value: id}, + utiltrace.Field{Key: "Depth", Value: depth}, + utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) + defer trace.LogIfLong(100 * time.Millisecond) + } err := process(item) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) @@ -562,7 +572,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // of the Deltas associated with K. Otherwise the pre-existing keys // are those listed by `f.knownObjects` and the current object of K is // what `f.knownObjects.GetByKey(K)` returns. -func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { +func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { f.lock.Lock() defer f.lock.Unlock() keys := make(sets.String, len(list)) diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index dfa95619309c1..7abdae73742ad 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) // ExpirationCache implements the store interface diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index 33afd32c867c7..a16f4735e3475 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -17,8 +17,8 @@ limitations under the License. package cache import ( - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/clock" ) type fakeThreadSafeMap struct { diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index edb2c8ed6c4e2..5c9255027a078 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -127,7 +127,7 @@ type FIFO struct { // Indication the queue is closed. // Used to indicate a queue is closed so a control loop can exit when a queue is empty. - // Currently, not used to gate any of CRED operations. + // Currently, not used to gate any of CRUD operations. closed bool } @@ -263,10 +263,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { func (f *FIFO) IsClosed() bool { f.lock.Lock() defer f.lock.Unlock() - if f.closed { - return true - } - return false + return f.closed } // Pop waits until an item is ready and processes it. If multiple items are diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index e503a45a467f4..819325e9e2e03 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -304,10 +304,7 @@ func (h *Heap) GetByKey(key string) (interface{}, bool, error) { func (h *Heap) IsClosed() bool { h.lock.RLock() defer h.lock.RUnlock() - if h.closed { - return true - } - return false + return h.closed } // NewHeap returns a Heap which can be used to queue up items to process. diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index fa29e6a704b6c..1d6aae560af66 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -78,7 +78,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } const ( - // NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field. + // NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) @@ -94,7 +94,7 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { // Index maps the indexed value to a set of keys in the store that match on that value type Index map[string]sets.String -// Indexers maps a name to a IndexFunc +// Indexers maps a name to an IndexFunc type Indexers map[string]IndexFunc // Indices maps a name to an Index diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index 4611e80ff0a87..b37537cbd89d3 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -99,7 +99,7 @@ func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { for { if d.lastRotated.IsZero() { d.lastRotated = time.Now() - } else if time.Now().Sub(d.lastRotated) > d.retainDuration { + } else if time.Since(d.lastRotated) > d.retainDuration { d.retainedCachedObjs = d.cachedObjs d.cachedObjs = nil d.lastRotated = time.Now() diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 360d7304b7ff1..0708a6e8b5896 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -40,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" + "k8s.io/utils/clock" "k8s.io/utils/trace" ) @@ -69,7 +69,7 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager - // initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch. + // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. initConnBackoffManager wait.BackoffManager resyncPeriod time.Duration @@ -216,13 +216,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) } var ( @@ -319,7 +319,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { panic(r) case <-listCh: } + initTrace.Step("Objects listed", trace.Field{"error", err}) if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err) } @@ -338,7 +340,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } r.setIsLastSyncResourceVersionUnavailable(false) // list was successful - initTrace.Step("Objects listed") listMetaInterface, err := meta.ListAccessor(list) if err != nil { return fmt.Errorf("unable to understand list result %#v: %v", list, err) @@ -417,7 +418,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. // If that's the case begin exponentially backing off and resend watch request. - if utilnet.IsConnectionRefused(err) { + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { <-r.initConnBackoffManager.Backoff().C() continue } @@ -432,6 +434,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + case apierrors.IsTooManyRequests(err): + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) + <-r.initConnBackoffManager.Backoff().C() + continue default: klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 3a3f538a155f0..4b7fc04e3ea6f 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -23,10 +23,10 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/buffer" + "k8s.io/utils/clock" "k8s.io/klog/v2" ) @@ -368,6 +368,10 @@ func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) er func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() + if s.HasStarted() { + klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") + return + } fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: s.indexer, EmitDeltaTypeReplaced: true, @@ -410,6 +414,12 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { s.controller.Run(stopCh) } +func (s *sharedIndexInformer) HasStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + func (s *sharedIndexInformer) HasSynced() bool { s.startedLock.Lock() defer s.startedLock.Unlock() @@ -694,9 +704,9 @@ type processorListener struct { // full resync from the shared informer, but modified by two // adjustments. One is imposing a lower bound, // `minimumResyncPeriod`. The other is another lower bound, the - // sharedProcessor's `resyncCheckPeriod`, that is imposed (a) only + // sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only // in AddEventHandlerWithResyncPeriod invocations made after the - // sharedProcessor starts and (b) only if the informer does + // sharedIndexInformer starts and (b) only if the informer does // resyncs at all. requestedResyncPeriod time.Duration // resyncPeriod is the threshold that will be used in the logic diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index 886e95d2deb65..24ffabab7b51b 100644 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -85,6 +85,11 @@ func (k KeyError) Error() string { return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) } +// Unwrap implements errors.Unwrap +func (k KeyError) Unwrap() error { + return k.Err +} + // ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for // the object but not the object itself. type ExplicitKey string diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 56251179b5f91..71d909d49e501 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -90,7 +90,7 @@ func (c *threadSafeMap) Delete(key string) { c.lock.Lock() defer c.lock.Unlock() if obj, exists := c.items[key]; exists { - c.deleteFromIndices(obj, key) + c.updateIndices(obj, nil, key) delete(c.items, key) } } @@ -251,64 +251,76 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { return nil } -// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj // updateIndices must be called from a function that already has a lock on the cache func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { - // if we got an old object, we need to remove it before we add it again - if oldObj != nil { - c.deleteFromIndices(oldObj, key) - } + var oldIndexValues, indexValues []string + var err error for name, indexFunc := range c.indexers { - indexValues, err := indexFunc(newObj) + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } - index := c.indices[name] - if index == nil { - index = Index{} - c.indices[name] = index - } - for _, indexValue := range indexValues { - set := index[indexValue] - if set == nil { - set = sets.String{} - index[indexValue] = set - } - set.Insert(key) + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] } - } -} - -// deleteFromIndices removes the object from each of the managed indexes -// it is intended to be called from a function that already has a lock on the cache -func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { - for name, indexFunc := range c.indexers { - indexValues, err := indexFunc(obj) if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } index := c.indices[name] if index == nil { + index = Index{} + c.indices[name] = index + } + + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed continue } - for _, indexValue := range indexValues { - set := index[indexValue] - if set != nil { - set.Delete(key) - - // If we don't delete the set when zero, indices with high cardinality - // short lived resources can cause memory to increase over time from - // unused empty sets. See `kubernetes/kubernetes/issues/84959`. - if len(set) == 0 { - delete(index, indexValue) - } - } + + for _, value := range oldIndexValues { + c.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + c.addKeyToIndex(key, value, index) } } } +func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} + +func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } +} + func (c *threadSafeMap) Resync() error { // Nothing to do return nil diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 24f469236edb3..44a2fb94b1585 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -124,7 +124,10 @@ type AuthInfo struct { // Impersonate is the username to act-as. // +optional Impersonate string `json:"act-as,omitempty"` - // ImpersonateGroups is the groups to imperonate. + // ImpersonateUID is the uid to impersonate. + // +optional + ImpersonateUID string `json:"act-as-uid,omitempty"` + // ImpersonateGroups is the groups to impersonate. // +optional ImpersonateGroups []string `json:"act-as-groups,omitempty"` // ImpersonateUserExtra contains additional information for impersonated user. @@ -245,6 +248,33 @@ type ExecConfig struct { // to be stored directly in the kubeconfig. // +k8s:conversion-gen=false Config runtime.Object + + // InteractiveMode determines this plugin's relationship with standard input. Valid + // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this + // exec plugin wants to use standard input if it is available), or "Always" (this exec + // plugin requires standard input to function). See ExecInteractiveMode values for more + // details. + // + // If APIVersion is client.authentication.k8s.io/v1alpha1 or + // client.authentication.k8s.io/v1beta1, then this field is optional and defaults + // to "IfAvailable" when unset. Otherwise, this field is required. + // +optional + InteractiveMode ExecInteractiveMode + + // StdinUnavailable indicates whether the exec authenticator can pass standard + // input through to this exec plugin. For example, a higher level entity might be using + // standard input for something else and therefore it would not be safe for the exec + // plugin to use standard input. This is kept here in order to keep all of the exec configuration + // together, but it is never serialized. + // +k8s:conversion-gen=false + StdinUnavailable bool + + // StdinUnavailableMessage is an optional message to be displayed when the exec authenticator + // cannot successfully run this exec plugin because it needs to use standard input and + // StdinUnavailable is true. For example, a process that is already using standard input to + // read user instructions might set this to "used by my-program to read user instructions". + // +k8s:conversion-gen=false + StdinUnavailableMessage string } var _ fmt.Stringer = new(ExecConfig) @@ -271,7 +301,7 @@ func (c ExecConfig) String() string { if c.Config != nil { config = "runtime.Object(--- REDACTED ---)" } - return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config) + return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s, StdinUnavailable: %t}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config, c.StdinUnavailable) } // ExecEnvVar is used for setting environment variables when executing an exec-based @@ -281,6 +311,26 @@ type ExecEnvVar struct { Value string `json:"value"` } +// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input. +type ExecInteractiveMode string + +const ( + // NeverExecInteractiveMode declares that this exec plugin never needs to use standard + // input, and therefore the exec plugin will be run regardless of whether standard input is + // available for user input. + NeverExecInteractiveMode ExecInteractiveMode = "Never" + // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input + // if it is available, but can still operate if standard input is not available. Therefore, the + // exec plugin will be run regardless of whether stdin is available for user input. If standard + // input is available for user input, then it will be provided to this exec plugin. + IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable" + // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to + // run, and therefore the exec plugin will only be run if standard input is available for user + // input. If standard input is not available for user input, then the exec plugin will not be run + // and an error will be returned by the exec plugin runner. + AlwaysExecInteractiveMode ExecInteractiveMode = "Always" +) + // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewConfig() *Config { return &Config{ diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go index c38ebc076045b..6eee281bc66c4 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -165,7 +165,7 @@ func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[str newExtension := (*in)[key] oldExtension := runtime.RawExtension{} if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { - return nil + return err } namedExtension := NamedExtension{key, oldExtension} *out = append(*out, namedExtension) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go new file mode 100644 index 0000000000000..bf513dc7c7b91 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_ExecConfig(exec *ExecConfig) { + if len(exec.InteractiveMode) == 0 { + switch exec.APIVersion { + case "client.authentication.k8s.io/v1beta1", "client.authentication.k8s.io/v1alpha1": + // default to IfAvailableExecInteractiveMode for backwards compatibility + exec.InteractiveMode = IfAvailableExecInteractiveMode + default: + // require other versions to explicitly declare whether they want stdin or not + } + } +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index ba5572ab07dd6..3ccdebc1c37ba 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -16,5 +16,6 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/client-go/tools/clientcmd/api // +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=Kind package v1 diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go index 24f6284c574e6..4a4d4a55fb486 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go @@ -37,7 +37,7 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) } func addKnownTypes(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 8c29b39c1b201..757ed817b2925 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -111,10 +111,13 @@ type AuthInfo struct { // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` - // Impersonate is the username to imperonate. The name matches the flag. + // Impersonate is the username to impersonate. The name matches the flag. // +optional Impersonate string `json:"as,omitempty"` - // ImpersonateGroups is the groups to imperonate. + // ImpersonateUID is the uid to impersonate. + // +optional + ImpersonateUID string `json:"as-uid,omitempty"` + // ImpersonateGroups is the groups to impersonate. // +optional ImpersonateGroups []string `json:"as-groups,omitempty"` // ImpersonateUserExtra contains additional information for impersonated user. @@ -221,6 +224,18 @@ type ExecConfig struct { // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for // reading this environment variable. ProvideClusterInfo bool `json:"provideClusterInfo"` + + // InteractiveMode determines this plugin's relationship with standard input. Valid + // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this + // exec plugin wants to use standard input if it is available), or "Always" (this exec + // plugin requires standard input to function). See ExecInteractiveMode values for more + // details. + // + // If APIVersion is client.authentication.k8s.io/v1alpha1 or + // client.authentication.k8s.io/v1beta1, then this field is optional and defaults + // to "IfAvailable" when unset. Otherwise, this field is required. + //+optional + InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` } // ExecEnvVar is used for setting environment variables when executing an exec-based @@ -229,3 +244,23 @@ type ExecEnvVar struct { Name string `json:"name"` Value string `json:"value"` } + +// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input. +type ExecInteractiveMode string + +const ( + // NeverExecInteractiveMode declares that this exec plugin never needs to use standard + // input, and therefore the exec plugin will be run regardless of whether standard input is + // available for user input. + NeverExecInteractiveMode ExecInteractiveMode = "Never" + // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input + // if it is available, but can still operate if standard input is not available. Therefore, the + // exec plugin will be run regardless of whether stdin is available for user input. If standard + // input is available for user input, then it will be provided to this exec plugin. + IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable" + // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to + // run, and therefore the exec plugin will only be run if standard input is available for user + // input. If standard input is not available for user input, then the exec plugin will not be run + // and an error will be returned by the exec plugin runner. + AlwaysExecInteractiveMode ExecInteractiveMode = "Always" +) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index 26e96529d38ed..a13bae64da3a7 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -166,6 +167,7 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s out.Token = in.Token out.TokenFile = in.TokenFile out.Impersonate = in.Impersonate + out.ImpersonateUID = in.ImpersonateUID out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) out.Username = in.Username @@ -200,6 +202,7 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s out.Token = in.Token out.TokenFile = in.TokenFile out.Impersonate = in.Impersonate + out.ImpersonateUID = in.ImpersonateUID out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) out.Username = in.Username @@ -376,6 +379,7 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint out.ProvideClusterInfo = in.ProvideClusterInfo + out.InteractiveMode = api.ExecInteractiveMode(in.InteractiveMode) return nil } @@ -392,6 +396,9 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo out.InstallHint = in.InstallHint out.ProvideClusterInfo = in.ProvideClusterInfo // INFO: in.Config opted out of conversion generation + out.InteractiveMode = ExecInteractiveMode(in.InteractiveMode) + // INFO: in.StdinUnavailable opted out of conversion generation + // INFO: in.StdinUnavailableMessage opted out of conversion generation return nil } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go index da519dfa3bc31..78492598bc9a7 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go new file mode 100644 index 0000000000000..6a57decf62aa9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go @@ -0,0 +1,43 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&Config{}, func(obj interface{}) { SetObjectDefaults_Config(obj.(*Config)) }) + return nil +} + +func SetObjectDefaults_Config(in *Config) { + for i := range in.AuthInfos { + a := &in.AuthInfos[i] + if a.AuthInfo.Exec != nil { + SetDefaults_ExecConfig(a.AuthInfo.Exec) + } + } +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index a04de6260ba4d..86e4ddef1b13a 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 0a905490c902e..cc37c9fbf6bde 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -181,6 +181,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { if len(configAuthInfo.Impersonate) > 0 { clientConfig.Impersonate = restclient.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, + UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } @@ -255,6 +256,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, + UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 12c8b84f37785..31f8963160e75 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -135,11 +135,7 @@ func (o *PathOptions) GetDefaultFilename() string { } func (o *PathOptions) IsExplicitFile() bool { - if len(o.LoadingRules.ExplicitPath) > 0 { - return true - } - - return false + return len(o.LoadingRules.ExplicitPath) > 0 } func (o *PathOptions) GetExplicitFile() string { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go index 95cba0fac27f5..ff643cc13dac1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -53,6 +53,7 @@ type AuthOverrideFlags struct { ClientKey FlagInfo Token FlagInfo Impersonate FlagInfo + ImpersonateUID FlagInfo ImpersonateGroups FlagInfo Username FlagInfo Password FlagInfo @@ -154,6 +155,7 @@ const ( FlagEmbedCerts = "embed-certs" FlagBearerToken = "token" FlagImpersonate = "as" + FlagImpersonateUID = "as-uid" FlagImpersonateGroup = "as-group" FlagUsername = "username" FlagPassword = "password" @@ -179,6 +181,7 @@ func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, + ImpersonateUID: FlagInfo{prefix + FlagImpersonateUID, "", "", "UID to impersonate for the operation"}, ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."}, Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, @@ -219,6 +222,7 @@ func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, fl flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) + flagNames.ImpersonateUID.BindStringFlag(flags, &authInfo.ImpersonateUID).AddSecretAnnotation(flags) flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags) flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index f77ef04fe54d6..2ae1eb706afca 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -229,7 +229,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ } if proxyURL := clusterInfo.ProxyURL; proxyURL != "" { if _, err := parseProxyURL(proxyURL); err != nil { - validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %v", proxyURL, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %w", proxyURL, clusterName, err)) } } // Make sure CA data and CA file aren't both specified @@ -239,7 +239,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) } else { defer clientCertCA.Close() } @@ -278,7 +278,7 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) } else { defer clientCertFile.Close() } @@ -286,7 +286,7 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) } else { defer clientKeyFile.Close() } @@ -308,6 +308,14 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) } } + switch authInfo.Exec.InteractiveMode { + case "": + validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName)) + case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode: + // These are valid + default: + validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode)) + } } // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case @@ -315,9 +323,9 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } - // ImpersonateGroups or ImpersonateUserExtra should be requested with a user - if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { - validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName)) + // ImpersonateUID, ImpersonateGroups or ImpersonateUserExtra should be requested with a user + if (len(authInfo.ImpersonateUID) > 0 || len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { + validationErrors = append(validationErrors, fmt.Errorf("requesting uid, groups or user-extra for %v without impersonating a user", authInfoName)) } return validationErrors } diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index f6c6a01298dc1..805859e092bc0 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -78,6 +78,7 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti options.Limit = p.PageSize } requestedResourceVersion := options.ResourceVersion + requestedResourceVersionMatch := options.ResourceVersionMatch var list *metainternalversion.List paginatedResult := false @@ -102,6 +103,7 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti options.Limit = 0 options.Continue = "" options.ResourceVersion = requestedResourceVersion + options.ResourceVersionMatch = requestedResourceVersionMatch result, err := p.PageFn(ctx, options) return result, paginatedResult, err } @@ -135,10 +137,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // set the next loop up options.Continue = m.GetContinue() - // Clear the ResourceVersion on the subsequent List calls to avoid the + // Clear the ResourceVersion(Match) on the subsequent List calls to avoid the // `specifying resource version is not allowed when using continue` error. // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. options.ResourceVersion = "" + options.ResourceVersionMatch = "" // At this point, result is already paginated. paginatedResult = true } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 070474831756b..89de798f605e3 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -82,6 +82,8 @@ type Config struct { type ImpersonationConfig struct { // UserName matches user.Info.GetName() UserName string + // UID matches user.Info.GetUID() + UID string // Groups matches user.Info.GetGroups() Groups []string // Extra matches user.Info.GetExtra() diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index cd0a4455f194d..4c74606a98e0f 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -17,9 +17,12 @@ limitations under the License. package transport import ( + "crypto/tls" "fmt" "net/http" + "net/http/httptrace" "strings" + "sync" "time" "golang.org/x/oauth2" @@ -57,6 +60,7 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip rt = NewUserAgentRoundTripper(config.UserAgent, rt) } if len(config.Impersonate.UserName) > 0 || + len(config.Impersonate.UID) > 0 || len(config.Impersonate.Groups) > 0 || len(config.Impersonate.Extra) > 0 { rt = NewImpersonatingRoundTripper(config.Impersonate, rt) @@ -68,7 +72,7 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { case bool(klog.V(9).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugURLTiming, DebugResponseHeaders) + rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugDetailedTiming, DebugResponseHeaders) case bool(klog.V(8).Enabled()): rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus, DebugResponseHeaders) case bool(klog.V(7).Enabled()): @@ -88,6 +92,8 @@ type authProxyRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{} + // NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for // authentication terminating proxy cases // assuming you pull the user from the context: @@ -146,6 +152,8 @@ type userAgentRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &userAgentRoundTripper{} + // NewUserAgentRoundTripper will add User-Agent header to a request unless it has already been set. func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { return &userAgentRoundTripper{agent, rt} @@ -172,6 +180,8 @@ type basicAuthRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &basicAuthRoundTripper{} + // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a // request unless it has already been set. func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { @@ -199,6 +209,9 @@ const ( // ImpersonateUserHeader is used to impersonate a particular user during an API server request ImpersonateUserHeader = "Impersonate-User" + // ImpersonateUIDHeader is used to impersonate a particular UID during an API server request + ImpersonateUIDHeader = "Impersonate-Uid" + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. // It can be repeated multiplied times for multiple groups. ImpersonateGroupHeader = "Impersonate-Group" @@ -218,6 +231,8 @@ type impersonatingRoundTripper struct { delegate http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &impersonatingRoundTripper{} + // NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper { return &impersonatingRoundTripper{impersonate, delegate} @@ -230,7 +245,9 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } req = utilnet.CloneRequest(req) req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) - + if rt.impersonate.UID != "" { + req.Header.Set(ImpersonateUIDHeader, rt.impersonate.UID) + } for _, group := range rt.impersonate.Groups { req.Header.Add(ImpersonateGroupHeader, group) } @@ -255,6 +272,8 @@ type bearerAuthRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &bearerAuthRoundTripper{} + // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { @@ -314,6 +333,14 @@ type requestInfo struct { ResponseHeaders http.Header ResponseErr error + muTrace sync.Mutex // Protect trace fields + DNSLookup time.Duration + Dialing time.Duration + GetConnection time.Duration + TLSHandshake time.Duration + ServerProcessing time.Duration + ConnectionReused bool + Duration time.Duration } @@ -346,7 +373,7 @@ func (r *requestInfo) toCurl() string { } } - return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL) + return fmt.Sprintf("curl -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL) } // debuggingRoundTripper will display information about the requests passing @@ -356,6 +383,8 @@ type debuggingRoundTripper struct { levels map[DebugLevel]bool } +var _ utilnet.RoundTripperWrapper = &debuggingRoundTripper{} + // DebugLevel is used to enable debugging of certain // HTTP requests and responses fields via the debuggingRoundTripper. type DebugLevel int @@ -374,6 +403,8 @@ const ( DebugResponseStatus // DebugResponseHeaders will add to the debug output the HTTP response headers. DebugResponseHeaders + // DebugDetailedTiming will add to the debug output the duration of the HTTP requests events. + DebugDetailedTiming ) // NewDebuggingRoundTripper allows to display in the logs output debug information @@ -445,6 +476,74 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } startTime := time.Now() + + if rt.levels[DebugDetailedTiming] { + var getConn, dnsStart, dialStart, tlsStart, serverStart time.Time + var host string + trace := &httptrace.ClientTrace{ + // DNS + DNSStart: func(info httptrace.DNSStartInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + dnsStart = time.Now() + host = info.Host + }, + DNSDone: func(info httptrace.DNSDoneInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.DNSLookup = time.Now().Sub(dnsStart) + klog.Infof("HTTP Trace: DNS Lookup for %s resolved to %v", host, info.Addrs) + }, + // Dial + ConnectStart: func(network, addr string) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + dialStart = time.Now() + }, + ConnectDone: func(network, addr string, err error) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.Dialing = time.Now().Sub(dialStart) + if err != nil { + klog.Infof("HTTP Trace: Dial to %s:%s failed: %v", network, addr, err) + } else { + klog.Infof("HTTP Trace: Dial to %s:%s succeed", network, addr) + } + }, + // TLS + TLSHandshakeStart: func() { + tlsStart = time.Now() + }, + TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.TLSHandshake = time.Now().Sub(tlsStart) + }, + // Connection (it can be DNS + Dial or just the time to get one from the connection pool) + GetConn: func(hostPort string) { + getConn = time.Now() + }, + GotConn: func(info httptrace.GotConnInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.GetConnection = time.Now().Sub(getConn) + reqInfo.ConnectionReused = info.Reused + }, + // Server Processing (time since we wrote the request until first byte is received) + WroteRequest: func(info httptrace.WroteRequestInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + serverStart = time.Now() + }, + GotFirstResponseByte: func() { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.ServerProcessing = time.Now().Sub(serverStart) + }, + } + req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) + } + response, err := rt.delegatedRoundTripper.RoundTrip(req) reqInfo.Duration = time.Since(startTime) @@ -453,6 +552,24 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e if rt.levels[DebugURLTiming] { klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } + if rt.levels[DebugDetailedTiming] { + stats := "" + if !reqInfo.ConnectionReused { + stats += fmt.Sprintf(`DNSLookup %d ms Dial %d ms TLSHandshake %d ms`, + reqInfo.DNSLookup.Nanoseconds()/int64(time.Millisecond), + reqInfo.Dialing.Nanoseconds()/int64(time.Millisecond), + reqInfo.TLSHandshake.Nanoseconds()/int64(time.Millisecond), + ) + } else { + stats += fmt.Sprintf(`GetConnection %d ms`, reqInfo.GetConnection.Nanoseconds()/int64(time.Millisecond)) + } + if reqInfo.ServerProcessing != 0 { + stats += fmt.Sprintf(` ServerProcessing %d ms`, reqInfo.ServerProcessing.Nanoseconds()/int64(time.Millisecond)) + } + stats += fmt.Sprintf(` Duration %d ms`, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("HTTP Statistics: %s", stats) + } + if rt.levels[DebugResponseStatus] { klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index fea02e6111546..68a0a704fe458 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -26,6 +26,7 @@ import ( "golang.org/x/oauth2" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/klog/v2" ) @@ -95,6 +96,8 @@ type tokenSourceTransport struct { src ResettableTokenSource } +var _ utilnet.RoundTripperWrapper = &tokenSourceTransport{} + func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // This is to allow --token to override other bearer token providers. if req.Header.Get("Authorization") != "" { @@ -119,6 +122,8 @@ func (tst *tokenSourceTransport) CancelRequest(req *http.Request) { tryCancelRequest(tst.ort, req) } +func (tst *tokenSourceTransport) WrappedRoundTripper() http.RoundTripper { return tst.base } + type fileTokenSource struct { path string period time.Duration diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 88d89494d728c..b4a7bfa67cda1 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -20,6 +20,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "encoding/pem" "fmt" "io/ioutil" "net/http" @@ -79,7 +80,11 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) + rootCAs, err := rootCertPool(c.TLS.CAData) + if err != nil { + return nil, fmt.Errorf("unable to load root certificates: %w", err) + } + tlsConfig.RootCAs = rootCAs } var staticCert *tls.Certificate @@ -176,18 +181,41 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { // rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". // When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { +func rootCertPool(caData []byte) (*x509.CertPool, error) { // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values // It doesn't allow trusting either/or, but hopefully that won't be an issue if len(caData) == 0 { - return nil + return nil, nil } // if we have caData, use it certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool + if ok := certPool.AppendCertsFromPEM(caData); !ok { + return nil, createErrorParsingCAData(caData) + } + return certPool, nil +} + +// createErrorParsingCAData ALWAYS returns an error. We call it because know we failed to AppendCertsFromPEM +// but we don't know the specific error because that API is just true/false +func createErrorParsingCAData(pemCerts []byte) error { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + return fmt.Errorf("unable to parse bytes as PEM block") + } + + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + if _, err := x509.ParseCertificate(block.Bytes); err != nil { + return fmt.Errorf("failed to parse certificate: %w", err) + } + } + return fmt.Errorf("no valid certificate authority data seen") } // WrapperFunc wraps an http.RoundTripper when a new transport @@ -269,7 +297,7 @@ type certificateCacheEntry struct { // isStale returns true when this cache entry is too old to be usable func (c *certificateCacheEntry) isStale() bool { - return time.Now().Sub(c.birth) > time.Second + return time.Since(c.birth) > time.Second } func newCertificateCacheEntry(certFile, keyFile string) certificateCacheEntry { diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 3da144163683a..75143ec071769 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -33,6 +33,7 @@ import ( "time" "k8s.io/client-go/util/keyutil" + netutils "k8s.io/utils/net" ) const duration365d = time.Hour * 24 * 365 @@ -62,6 +63,7 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro CommonName: cfg.CommonName, Organization: cfg.Organization, }, + DNSNames: []string{cfg.CommonName}, NotBefore: now.UTC(), NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, @@ -156,7 +158,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a BasicConstraintsValid: true, } - if ip := net.ParseIP(host); ip != nil { + if ip := netutils.ParseIPSloppy(host); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, host) diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index c48ba03e8cd1d..3ef88dbdb894e 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -17,10 +17,12 @@ limitations under the License. package flowcontrol import ( + "math/rand" "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" + testingclock "k8s.io/utils/clock/testing" "k8s.io/utils/integer" ) @@ -35,23 +37,43 @@ type Backoff struct { defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry + rand *rand.Rand + + // maxJitterFactor adds jitter to the exponentially backed off delay. + // if maxJitterFactor is zero, no jitter is added to the delay in + // order to maintain current behavior. + maxJitterFactor float64 } -func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { - return &Backoff{ - perItemBackoff: map[string]*backoffEntry{}, - Clock: tc, - defaultDuration: initial, - maxDuration: max, - } +func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff { + return newBackoff(tc, initial, max, 0.0) } func NewBackOff(initial, max time.Duration) *Backoff { + return NewBackOffWithJitter(initial, max, 0.0) +} + +func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff { + return newBackoff(tc, initial, max, maxJitterFactor) +} + +func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff { + clock := clock.RealClock{} + return newBackoff(clock, initial, max, maxJitterFactor) +} + +func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff { + var random *rand.Rand + if maxJitterFactor > 0 { + random = rand.New(rand.NewSource(clock.Now().UnixNano())) + } return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, - Clock: clock.RealClock{}, + Clock: clock, defaultDuration: initial, maxDuration: max, + maxJitterFactor: maxJitterFactor, + rand: random, } } @@ -74,8 +96,10 @@ func (p *Backoff) Next(id string, eventTime time.Time) { entry, ok := p.perItemBackoff[id] if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { entry = p.initEntryUnsafe(id) + entry.backoff += p.jitter(entry.backoff) } else { - delay := entry.backoff * 2 // exponential + delay := entry.backoff * 2 // exponential + delay += p.jitter(entry.backoff) // add some jitter to the delay entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) } entry.lastUpdate = p.Clock.Now() @@ -143,6 +167,14 @@ func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { return entry } +func (p *Backoff) jitter(delay time.Duration) time.Duration { + if p.rand == nil { + return 0 + } + + return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay)) +} + // After 2*maxDuration we restart the backoff factor to the beginning func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index ffd912c56022e..af7abd898fa64 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -23,26 +23,36 @@ import ( "time" "golang.org/x/time/rate" + "k8s.io/utils/clock" ) -type RateLimiter interface { +type PassiveRateLimiter interface { // TryAccept returns true if a token is taken immediately. Otherwise, // it returns false. TryAccept() bool - // Accept returns once a token becomes available. - Accept() // Stop stops the rate limiter, subsequent calls to CanAccept will return false Stop() // QPS returns QPS of this rate limiter QPS() float32 +} + +type RateLimiter interface { + PassiveRateLimiter + // Accept returns once a token becomes available. + Accept() // Wait returns nil if a token is taken before the Context is done. Wait(ctx context.Context) error } -type tokenBucketRateLimiter struct { +type tokenBucketPassiveRateLimiter struct { limiter *rate.Limiter - clock Clock qps float32 + clock clock.PassiveClock +} + +type tokenBucketRateLimiter struct { + tokenBucketPassiveRateLimiter + clock Clock } // NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. @@ -52,58 +62,73 @@ type tokenBucketRateLimiter struct { // The maximum number of tokens in the bucket is capped at 'burst'. func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) - return newTokenBucketRateLimiter(limiter, realClock{}, qps) + return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps) +} + +// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns +// a PassiveRateLimiter which does not have Accept() and Wait() methods. +func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps) } // An injectable, mockable clock interface. type Clock interface { - Now() time.Time + clock.PassiveClock Sleep(time.Duration) } -type realClock struct{} - -func (realClock) Now() time.Time { - return time.Now() -} -func (realClock) Sleep(d time.Duration) { - time.Sleep(d) -} +var _ Clock = (*clock.RealClock)(nil) // NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter // but allows an injectable clock, for testing. func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) - return newTokenBucketRateLimiter(limiter, c, qps) + return newTokenBucketRateLimiterWithClock(limiter, c, qps) +} + +// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock +// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods +// and uses a PassiveClock. +func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps) } -func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter { +func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter { return &tokenBucketRateLimiter{ + tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps), + clock: c, + } +} + +func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter { + return &tokenBucketPassiveRateLimiter{ limiter: limiter, - clock: c, qps: qps, + clock: c, } } -func (t *tokenBucketRateLimiter) TryAccept() bool { - return t.limiter.AllowN(t.clock.Now(), 1) +func (tbprl *tokenBucketPassiveRateLimiter) Stop() { } -// Accept will block until a token becomes available -func (t *tokenBucketRateLimiter) Accept() { - now := t.clock.Now() - t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now)) +func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 { + return tbprl.qps } -func (t *tokenBucketRateLimiter) Stop() { +func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool { + return tbprl.limiter.AllowN(tbprl.clock.Now(), 1) } -func (t *tokenBucketRateLimiter) QPS() float32 { - return t.qps +// Accept will block until a token becomes available +func (tbrl *tokenBucketRateLimiter) Accept() { + now := tbrl.clock.Now() + tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now)) } -func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error { - return t.limiter.Wait(ctx) +func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error { + return tbrl.limiter.Wait(ctx) } type fakeAlwaysRateLimiter struct{} @@ -157,3 +182,11 @@ func (t *fakeNeverRateLimiter) QPS() float32 { func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { return errors.New("can not be accept") } + +var ( + _ RateLimiter = (*tokenBucketRateLimiter)(nil) + _ RateLimiter = (*fakeAlwaysRateLimiter)(nil) + _ RateLimiter = (*fakeNeverRateLimiter)(nil) +) + +var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil) diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index 71bb6322e075b..efda7c197fc8d 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -27,7 +27,7 @@ import ( type RateLimiter interface { // When gets an item and gets to decide how long that item should wait When(item interface{}) time.Duration - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it Forget(item interface{}) // NumRequeues returns back how many failures the item has had @@ -209,3 +209,30 @@ func (r *MaxOfRateLimiter) Forget(item interface{}) { limiter.Forget(item) } } + +// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type WithMaxWaitRateLimiter struct { + limiter RateLimiter + maxDelay time.Duration +} + +func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { + return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} +} + +func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { + delay := w.limiter.When(item) + if delay > w.maxDelay { + return w.maxDelay + } + + return delay +} + +func (w WithMaxWaitRateLimiter) Forget(item interface{}) { + w.limiter.Forget(item) +} + +func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { + return w.limiter.NumRequeues(item) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 31d9182dea6f0..61c4da530c0bf 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -21,8 +21,8 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" ) // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to @@ -51,11 +51,11 @@ func NewNamedDelayingQueue(name string) DelayingInterface { // NewDelayingQueueWithCustomClock constructs a new named workqueue // with ability to inject real or fake clock for testing purposes -func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface { +func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { return newDelayingQueue(clock, NewNamed(name), name) } -func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType { +func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { ret := &delayingType{ Interface: q, clock: clock, diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 556e6432eb100..4b0a69616d3f0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) // This file provides abstractions for setting the provider (e.g., prometheus) diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index f7c14ddcdb535..6f7063269fb54 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) type Interface interface { @@ -29,6 +29,7 @@ type Interface interface { Get() (item interface{}, shutdown bool) Done(item interface{}) ShutDown() + ShutDownWithDrain() ShuttingDown() bool } @@ -46,7 +47,7 @@ func NewNamed(name string) *Type { ) } -func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type { +func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { t := &Type{ clock: c, dirty: set{}, @@ -86,11 +87,12 @@ type Type struct { cond *sync.Cond shuttingDown bool + drain bool metrics queueMetrics unfinishedWorkUpdatePeriod time.Duration - clock clock.Clock + clock clock.WithTicker } type empty struct{} @@ -110,6 +112,10 @@ func (s set) delete(item t) { delete(s, item) } +func (s set) len() int { + return len(s) +} + // Add marks item as needing processing. func (q *Type) Add(item interface{}) { q.cond.L.Lock() @@ -155,7 +161,10 @@ func (q *Type) Get() (item interface{}, shutdown bool) { return nil, true } - item, q.queue = q.queue[0], q.queue[1:] + item = q.queue[0] + // The underlying array still exists and reference this object, so the object will not be garbage collected. + q.queue[0] = nil + q.queue = q.queue[1:] q.metrics.get(item) @@ -178,13 +187,71 @@ func (q *Type) Done(item interface{}) { if q.dirty.has(item) { q.queue = append(q.queue, item) q.cond.Signal() + } else if q.processing.len() == 0 { + q.cond.Signal() } } -// ShutDown will cause q to ignore all new items added to it. As soon as the -// worker goroutines have drained the existing items in the queue, they will be -// instructed to exit. +// ShutDown will cause q to ignore all new items added to it and +// immediately instruct the worker goroutines to exit. func (q *Type) ShutDown() { + q.setDrain(false) + q.shutdown() +} + +// ShutDownWithDrain will cause q to ignore all new items added to it. As soon +// as the worker goroutines have "drained", i.e: finished processing and called +// Done on all existing items in the queue; they will be instructed to exit and +// ShutDownWithDrain will return. Hence: a strict requirement for using this is; +// your workers must ensure that Done is called on all items in the queue once +// the shut down has been initiated, if that is not the case: this will block +// indefinitely. It is, however, safe to call ShutDown after having called +// ShutDownWithDrain, as to force the queue shut down to terminate immediately +// without waiting for the drainage. +func (q *Type) ShutDownWithDrain() { + q.setDrain(true) + q.shutdown() + for q.isProcessing() && q.shouldDrain() { + q.waitForProcessing() + } +} + +// isProcessing indicates if there are still items on the work queue being +// processed. It's used to drain the work queue on an eventual shutdown. +func (q *Type) isProcessing() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.processing.len() != 0 +} + +// waitForProcessing waits for the worker goroutines to finish processing items +// and call Done on them. +func (q *Type) waitForProcessing() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + // Ensure that we do not wait on a queue which is already empty, as that + // could result in waiting for Done to be called on items in an empty queue + // which has already been shut down, which will result in waiting + // indefinitely. + if q.processing.len() == 0 { + return + } + q.cond.Wait() +} + +func (q *Type) setDrain(shouldDrain bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.drain = shouldDrain +} + +func (q *Type) shouldDrain() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.drain +} + +func (q *Type) shutdown() { q.cond.L.Lock() defer q.cond.L.Unlock() q.shuttingDown = true diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go new file mode 100644 index 0000000000000..35241fde647c7 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -0,0 +1,457 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +const ( + quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + input: models, + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + if err := c.convertAll(); err != nil { + return nil, err + } + c.addCommonTypes() + return c.output, nil +} + +type convert struct { + input proto.Models + preserveUnknownFields bool + output *schema.Schema + + currentName string + current *schema.Atom + errorMessages []string +} + +func (c *convert) push(name string, a *schema.Atom) *convert { + return &convert{ + input: c.input, + preserveUnknownFields: c.preserveUnknownFields, + output: c.output, + currentName: name, + current: a, + } +} + +func (c *convert) top() *schema.Atom { return c.current } + +func (c *convert) pop(c2 *convert) { + c.errorMessages = append(c.errorMessages, c2.errorMessages...) +} + +func (c *convert) convertAll() error { + for _, name := range c.input.ListModels() { + model := c.input.LookupModel(name) + c.insertTypeDef(name, model) + } + if len(c.errorMessages) > 0 { + return errors.New(strings.Join(c.errorMessages, "\n")) + } + return nil +} + +func (c *convert) reportError(format string, args ...interface{}) { + c.errorMessages = append(c.errorMessages, + c.currentName+": "+fmt.Sprintf(format, args...), + ) +} + +func (c *convert) insertTypeDef(name string, model proto.Schema) { + def := schema.TypeDef{ + Name: name, + } + c2 := c.push(name, &def.Atom) + model.Accept(c2) + c.pop(c2) + if def.Atom == (schema.Atom{}) { + // This could happen if there were a top-level reference. + return + } + c.output.Types = append(c.output.Types, def) +} + +func (c *convert) addCommonTypes() { + c.output.Types = append(c.output.Types, untypedDef) + c.output.Types = append(c.output.Types, deducedDef) +} + +var untypedName string = "__untyped_atomic_" + +var untypedDef schema.TypeDef = schema.TypeDef{ + Name: untypedName, + Atom: schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: &schema.List{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + Map: &schema.Map{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + }, +} + +var deducedName string = "__untyped_deduced_" + +var deducedDef schema.TypeDef = schema.TypeDef{ + Name: deducedName, + Atom: schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: &schema.List{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + Map: &schema.Map{ + ElementType: schema.TypeRef{ + NamedType: &deducedName, + }, + ElementRelationship: schema.Separable, + }, + }, +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { + schemaUnions := []schema.Union{} + if iunions, ok := extensions["x-kubernetes-unions"]; ok { + unions, ok := iunions.([]interface{}) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" should be a list, got %#v`, unions) + } + for _, iunion := range unions { + union, ok := iunion.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" items should be a map of string to unions, got %#v`, iunion) + } + unionMap := map[string]interface{}{} + for k, v := range union { + key, ok := k.(string) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" has non-string key: %#v`, k) + } + unionMap[key] = v + } + schemaUnion, err := makeUnion(unionMap) + if err != nil { + return nil, err + } + schemaUnions = append(schemaUnions, schemaUnion) + } + } + + // Make sure we have no overlap between unions + fs := map[string]struct{}{} + for _, u := range schemaUnions { + if u.Discriminator != nil { + if _, ok := fs[*u.Discriminator]; ok { + return nil, fmt.Errorf("%v field appears multiple times in unions", *u.Discriminator) + } + fs[*u.Discriminator] = struct{}{} + } + for _, f := range u.Fields { + if _, ok := fs[f.FieldName]; ok { + return nil, fmt.Errorf("%v field appears multiple times in unions", f.FieldName) + } + fs[f.FieldName] = struct{}{} + } + } + + return schemaUnions, nil +} + +func makeUnion(extensions map[string]interface{}) (schema.Union, error) { + union := schema.Union{ + Fields: []schema.UnionField{}, + } + + if idiscriminator, ok := extensions["discriminator"]; ok { + discriminator, ok := idiscriminator.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"discriminator" must be a string, got: %#v`, idiscriminator) + } + union.Discriminator = &discriminator + } + + if ifields, ok := extensions["fields-to-discriminateBy"]; ok { + fields, ok := ifields.(map[interface{}]interface{}) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy" must be a map[string]string, got: %#v`, ifields) + } + // Needs sorted keys by field. + keys := []string{} + for ifield := range fields { + field, ok := ifield.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy": field must be a string, got: %#v`, ifield) + } + keys = append(keys, field) + + } + sort.Strings(keys) + reverseMap := map[string]struct{}{} + for _, field := range keys { + value := fields[field] + discriminated, ok := value.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy"/%v: value must be a string, got: %#v`, field, value) + } + union.Fields = append(union.Fields, schema.UnionField{ + FieldName: field, + DiscriminatorValue: discriminated, + }) + + // Check that we don't have the same discriminateBy multiple times. + if _, ok := reverseMap[discriminated]; ok { + return schema.Union{}, fmt.Errorf("Multiple fields have the same discriminated name: %v", discriminated) + } + reverseMap[discriminated] = struct{}{} + } + } + + if union.Discriminator != nil && len(union.Fields) == 0 { + return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) + } + return union, nil +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + ext := k.GetExtensions() + if val, ok := ext["x-kubernetes-map-type"]; ok { + switch val { + case "atomic": + a.Map.ElementRelationship = schema.Atomic + case "granular": + a.Map.ElementRelationship = schema.Separable + default: + c.reportError("unknown map type %v", val) + } + } +} + +func toStringSlice(o interface{}) (out []string, ok bool) { + switch t := o.(type) { + case []interface{}: + for _, v := range t { + switch vt := v.(type) { + case string: + out = append(out, vt) + } + } + return out, true + } + return nil, false +} + +func (c *convert) VisitArray(a *proto.Array) { + atom := c.top() + atom.List = &schema.List{ + ElementRelationship: schema.Atomic, + } + l := atom.List + l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) + + ext := a.GetExtensions() + + if val, ok := ext["x-kubernetes-list-type"]; ok { + if val == "atomic" { + l.ElementRelationship = schema.Atomic + } else if val == "set" { + l.ElementRelationship = schema.Associative + } else if val == "map" { + l.ElementRelationship = schema.Associative + if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { + if keyNames, ok := toStringSlice(keys); ok { + l.Keys = keyNames + } else { + c.reportError("uninterpreted map keys: %#v", keys) + } + } else { + c.reportError("missing map keys") + } + } else { + c.reportError("unknown list type %v", val) + l.ElementRelationship = schema.Atomic + } + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + if val == "merge" || val == "merge,retainKeys" { + l.ElementRelationship = schema.Associative + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + if keyName, ok := key.(string); ok { + l.Keys = []string{keyName} + } else { + c.reportError("uninterpreted merge key: %#v", key) + } + } else { + // It's not an error for this to be absent, it + // means it's a set. + } + } else if val == "retainKeys" { + } else { + c.reportError("unknown patch strategy %v", val) + l.ElementRelationship = schema.Atomic + } + } +} + +func (c *convert) VisitMap(m *proto.Map) { + a := c.top() + a.Map = &schema.Map{} + a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + + ext := m.GetExtensions() + if val, ok := ext["x-kubernetes-map-type"]; ok { + switch val { + case "atomic": + a.Map.ElementRelationship = schema.Atomic + case "granular": + a.Map.ElementRelationship = schema.Separable + default: + c.reportError("unknown map type %v", val) + } + } +} + +func ptr(s schema.Scalar) *schema.Scalar { return &s } + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + if c.currentName == quantityResource { + a.Scalar = ptr(schema.Scalar("untyped")) + } else { + switch p.Type { + case proto.Integer: + a.Scalar = ptr(schema.Numeric) + case proto.Number: + a.Scalar = ptr(schema.Numeric) + case proto.String: + switch p.Format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) + } + case proto.Boolean: + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) + } + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = deducedDef.Atom +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 0000000000000..9621a6a3a4acf --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 0000000000000..11ed8a6b7ccf9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 0000000000000..4abcf9b82423e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,362 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + // TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error. + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + // TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error. + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + // TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error. + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + return &Ref{ + BaseSchema: base, + reference: reference, + definitions: d, + }, nil +} + +func parseDefault(def *openapi_v2.Any) (interface{}, error) { + if def == nil { + return nil, nil + } + var i interface{} + if err := yaml.Unmarshal([]byte(def.Yaml), &i); err != nil { + return nil, err + } + return i, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) (BaseSchema, error) { + def, err := parseDefault(s.GetDefault()) + if err != nil { + return BaseSchema{}, err + } + return BaseSchema{ + Description: s.GetDescription(), + Default: def, + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + }, nil +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + var sub Schema + // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. + if s.GetAdditionalProperties().GetSchema() == nil { + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + sub = &Arbitrary{ + BaseSchema: base, + } + } else { + var err error + sub, err = d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + return &Map{ + BaseSchema: base, + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: // do nothing + case Number: // do nothing + case Integer: // do nothing + case Boolean: // do nothing + // TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error. + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + return &Primitive{ + BaseSchema: base, + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + // TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error. + // TODO(wrong): "type: array" witohut any items at all is completely valid. + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: base, + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + fieldOrder := []string{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + name := namedSchema.GetName() + path := path.FieldPath(name) + fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + fieldOrder = append(fieldOrder, name) + } + + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + return &Kind{ + BaseSchema: base, + RequiredFields: s.GetRequired(), + Fields: fields, + FieldOrder: fieldOrder, + }, nil +} + +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } + return &Arbitrary{ + BaseSchema: base, + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if s.GetXRef() != "" { + // TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered + // Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object + return d.parseReference(s, path) + } + objectTypes := s.GetType().GetValue() + switch len(objectTypes) { + case 0: + // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include + // the type:object property (they only included the "properties" property), so we need to handle this case + // TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them. + if s.GetProperties() != nil { + // TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type. + // TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given + return d.parseKind(s, path) + } else { + // Definition has no type and no properties. Treat it as an arbitrary value + // TODO(incomplete): what if it has additionalProperties=false or patternProperties? + // ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete). + return d.parseArbitrary(s, path) + } + case 1: + t := objectTypes[0] + switch t { + case object: + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + return d.parseMap(s, path) + } + case array: + return d.parseArray(s, path) + } + return d.parsePrimitive(s, path) + default: + // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + // TODO(wrong): this is rejecting a completely valid OpenAPI spec + // TODO(CRD validation schema publishing): filter these out + return nil, newSchemaError(path, "definitions with multiple types aren't supported") + } +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 0000000000000..f31a2de2c452f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,285 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatibility, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Default for that schema. + GetDefault() interface{} + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + Default interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetDefault() interface{} { + return b.Default +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema + // FieldOrder reports the canonical order for the fields. + FieldOrder []string +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/k8s.io/utils/clock/README.md b/vendor/k8s.io/utils/clock/README.md new file mode 100644 index 0000000000000..ad2a8868aaec1 --- /dev/null +++ b/vendor/k8s.io/utils/clock/README.md @@ -0,0 +1,4 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go new file mode 100644 index 0000000000000..dd181ce8d8b00 --- /dev/null +++ b/vendor/k8s.io/utils/clock/clock.go @@ -0,0 +1,168 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go new file mode 100644 index 0000000000000..fb493c4babaaa --- /dev/null +++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go @@ -0,0 +1,360 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +var ( + _ = clock.PassiveClock(&FakePassiveClock{}) + _ = clock.WithTicker(&FakeClock{}) + _ = clock.Clock(&IntervalClock{}) +) + +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { + lock sync.RWMutex + time time.Time +} + +// FakeClock implements clock.Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock + + // waiters are waiting for the fake time to pass their specified time + waiters []*fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time + fired bool + afterFunc func() +} + +// NewFakePassiveClock returns a new FakePassiveClock. +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } +} + +// NewFakeClock constructs a fake clock set to the provided time. +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + FakePassiveClock: *NewFakePassiveClock(t), + } +} + +// Now returns f's time. +func (f *FakePassiveClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// SetTime sets the time on the FakePassiveClock. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + +// After is the fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// NewTimer constructs a fake timer, akin to time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) clock.Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, &timer.waiter) + return timer +} + +// AfterFunc is the Fake version of time.AfterFunc(d, cb). +func (f *FakeClock) AfterFunc(d time.Duration, cb func()) clock.Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + afterFunc: cb, + }, + } + f.waiters = append(f.waiters, &timer.waiter) + return timer +} + +// Tick constructs a fake ticker, akin to time.Tick +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + if d <= 0 { + return nil + } + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// NewTicker returns a new Ticker. +func (f *FakeClock) NewTicker(d time.Duration) clock.Ticker { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return &fakeTicker{ + c: ch, + } +} + +// Step moves the clock by Duration and notifies anyone that's called After, +// Tick, or NewTimer. +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// SetTime sets the time. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]*fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := f.waiters[i] + if !w.targetTime.After(t) { + if w.skipIfBlocked { + select { + case w.destChan <- t: + w.fired = true + default: + } + } else { + w.destChan <- t + w.fired = true + } + + if w.afterFunc != nil { + w.afterFunc() + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied (so you can +// write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +// Sleep is akin to time.Sleep +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration. +// IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic. +// See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// After is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// NewTimer is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) clock.Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// AfterFunc is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) AfterFunc(d time.Duration, f func()) clock.Timer { + panic("IntervalClock doesn't implement AfterFunc") +} + +// Tick is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + +// NewTicker has no implementation yet and is omitted. +// TODO: make interval clock use FakeClock so this can be implemented. +//func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { +// panic("IntervalClock doesn't implement NewTicker") +//} + +// Sleep is unimplemented, will panic. +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +var _ = clock.Timer(&fakeTimer{}) + +// fakeTimer implements clock.Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, w) + } + } + + f.fakeClock.waiters = newWaiters + + return !f.waiter.fired +} + +// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet +// fired, or false otherwise. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := !f.waiter.fired + + f.waiter.fired = false + f.waiter.targetTime = f.fakeClock.time.Add(d) + + var isWaiting bool + for i := range f.fakeClock.waiters { + w := f.fakeClock.waiters[i] + if w == &f.waiter { + isWaiting = true + break + } + } + if !isWaiting { + f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter) + } + + return active +} + +type fakeTicker struct { + c <-chan time.Time +} + +func (t *fakeTicker) C() <-chan time.Time { + return t.c +} + +func (t *fakeTicker) Stop() { +} diff --git a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go new file mode 100644 index 0000000000000..951ca4d170af8 --- /dev/null +++ b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "time" + + "k8s.io/utils/clock" +) + +var ( + _ = clock.PassiveClock(&SimpleIntervalClock{}) +) + +// SimpleIntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration +type SimpleIntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *SimpleIntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *SimpleIntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE b/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000000..74487567632c8 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS b/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go new file mode 100644 index 0000000000000..4340b6e74829a --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go @@ -0,0 +1,236 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IP address manipulations +// +// IPv4 addresses are 4 bytes; IPv6 addresses are 16 bytes. +// An IPv4 address can be converted to an IPv6 address by +// adding a canonical prefix (10 zeros, 2 0xFFs). +// This library accepts either size of byte slice but always +// returns 16-byte addresses. + +package net + +/////////////////////////////////////////////////////////////////////////////// +// NOTE: This file was forked because we need to maintain backwards-compatible +// IP parsing logic, which was changed in a correct but incompatible way in +// go-1.17. +// +// See https://issue.k8s.io/100895 +/////////////////////////////////////////////////////////////////////////////// + +import ( + stdnet "net" +) + +// +// Lean on the standard net lib as much as possible. +// + +type IP = stdnet.IP +type IPNet = stdnet.IPNet +type ParseError = stdnet.ParseError + +const IPv4len = stdnet.IPv4len +const IPv6len = stdnet.IPv6len + +var CIDRMask = stdnet.CIDRMask +var IPv4 = stdnet.IPv4 + +// Parse IPv4 address (d.d.d.d). +func parseIPv4(s string) IP { + var p [IPv4len]byte + for i := 0; i < IPv4len; i++ { + if len(s) == 0 { + // Missing octets. + return nil + } + if i > 0 { + if s[0] != '.' { + return nil + } + s = s[1:] + } + n, c, ok := dtoi(s) + if !ok || n > 0xFF { + return nil + } + // + // NOTE: This correct check was added for go-1.17, but is a + // backwards-incompatible change for kubernetes users, who might have + // stored data which uses these leading zeroes already. + // + // See https://issue.k8s.io/100895 + // + //if c > 1 && s[0] == '0' { + // // Reject non-zero components with leading zeroes. + // return nil + //} + s = s[c:] + p[i] = byte(n) + } + if len(s) != 0 { + return nil + } + return IPv4(p[0], p[1], p[2], p[3]) +} + +// parseIPv6 parses s as a literal IPv6 address described in RFC 4291 +// and RFC 5952. +func parseIPv6(s string) (ip IP) { + ip = make(IP, IPv6len) + ellipsis := -1 // position of ellipsis in ip + + // Might have leading ellipsis + if len(s) >= 2 && s[0] == ':' && s[1] == ':' { + ellipsis = 0 + s = s[2:] + // Might be only ellipsis + if len(s) == 0 { + return ip + } + } + + // Loop, parsing hex numbers followed by colon. + i := 0 + for i < IPv6len { + // Hex number. + n, c, ok := xtoi(s) + if !ok || n > 0xFFFF { + return nil + } + + // If followed by dot, might be in trailing IPv4. + if c < len(s) && s[c] == '.' { + if ellipsis < 0 && i != IPv6len-IPv4len { + // Not the right place. + return nil + } + if i+IPv4len > IPv6len { + // Not enough room. + return nil + } + ip4 := parseIPv4(s) + if ip4 == nil { + return nil + } + ip[i] = ip4[12] + ip[i+1] = ip4[13] + ip[i+2] = ip4[14] + ip[i+3] = ip4[15] + s = "" + i += IPv4len + break + } + + // Save this 16-bit chunk. + ip[i] = byte(n >> 8) + ip[i+1] = byte(n) + i += 2 + + // Stop at end of string. + s = s[c:] + if len(s) == 0 { + break + } + + // Otherwise must be followed by colon and more. + if s[0] != ':' || len(s) == 1 { + return nil + } + s = s[1:] + + // Look for ellipsis. + if s[0] == ':' { + if ellipsis >= 0 { // already have one + return nil + } + ellipsis = i + s = s[1:] + if len(s) == 0 { // can be at end + break + } + } + } + + // Must have used entire string. + if len(s) != 0 { + return nil + } + + // If didn't parse enough, expand ellipsis. + if i < IPv6len { + if ellipsis < 0 { + return nil + } + n := IPv6len - i + for j := i - 1; j >= ellipsis; j-- { + ip[j+n] = ip[j] + } + for j := ellipsis + n - 1; j >= ellipsis; j-- { + ip[j] = 0 + } + } else if ellipsis >= 0 { + // Ellipsis must represent at least one 0 group. + return nil + } + return ip +} + +// ParseIP parses s as an IP address, returning the result. +// The string s can be in IPv4 dotted decimal ("192.0.2.1"), IPv6 +// ("2001:db8::68"), or IPv4-mapped IPv6 ("::ffff:192.0.2.1") form. +// If s is not a valid textual representation of an IP address, +// ParseIP returns nil. +func ParseIP(s string) IP { + for i := 0; i < len(s); i++ { + switch s[i] { + case '.': + return parseIPv4(s) + case ':': + return parseIPv6(s) + } + } + return nil +} + +// ParseCIDR parses s as a CIDR notation IP address and prefix length, +// like "192.0.2.0/24" or "2001:db8::/32", as defined in +// RFC 4632 and RFC 4291. +// +// It returns the IP address and the network implied by the IP and +// prefix length. +// For example, ParseCIDR("192.0.2.1/24") returns the IP address +// 192.0.2.1 and the network 192.0.2.0/24. +func ParseCIDR(s string) (IP, *IPNet, error) { + i := indexByteString(s, '/') + if i < 0 { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + addr, mask := s[:i], s[i+1:] + iplen := IPv4len + ip := parseIPv4(addr) + if ip == nil { + iplen = IPv6len + ip = parseIPv6(addr) + } + n, i, ok := dtoi(mask) + if ip == nil || !ok || i != len(mask) || n < 0 || n > 8*iplen { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + m := CIDRMask(n, 8*iplen) + return ip, &IPNet{IP: ip.Mask(m), Mask: m}, nil +} + +// This is copied from go/src/internal/bytealg, which includes versions +// optimized for various platforms. Those optimizations are elided here so we +// don't have to maintain them. +func indexByteString(s string, c byte) int { + for i := 0; i < len(s); i++ { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go new file mode 100644 index 0000000000000..cc2fdcb958532 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go @@ -0,0 +1,59 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple file i/o and string manipulation, to avoid +// depending on strconv and bufio and strings. + +package net + +/////////////////////////////////////////////////////////////////////////////// +// NOTE: This file was forked because it is used by other code that needed to +// be forked, not because it is used on its own. +/////////////////////////////////////////////////////////////////////////////// + +// Bigger than we need, not too big to worry about overflow +const big = 0xFFFFFF + +// Decimal to integer. +// Returns number, characters consumed, success. +func dtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + n = n*10 + int(s[i]-'0') + if n >= big { + return big, i, false + } + } + if i == 0 { + return 0, 0, false + } + return n, i, true +} + +// Hexadecimal to integer. +// Returns number, characters consumed, success. +func xtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' { + n *= 16 + n += int(s[i] - '0') + } else if 'a' <= s[i] && s[i] <= 'f' { + n *= 16 + n += int(s[i]-'a') + 10 + } else if 'A' <= s[i] && s[i] <= 'F' { + n *= 16 + n += int(s[i]-'A') + 10 + } else { + break + } + if n >= big { + return 0, i, false + } + } + if i == 0 { + return 0, i, false + } + return n, i, true +} diff --git a/vendor/k8s.io/utils/net/ipnet.go b/vendor/k8s.io/utils/net/ipnet.go new file mode 100644 index 0000000000000..2f3ee37f0b803 --- /dev/null +++ b/vendor/k8s.io/utils/net/ipnet.go @@ -0,0 +1,221 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "strings" +) + +// IPNetSet maps string to net.IPNet. +type IPNetSet map[string]*net.IPNet + +// ParseIPNets parses string slice to IPNetSet. +func ParseIPNets(specs ...string) (IPNetSet, error) { + ipnetset := make(IPNetSet) + for _, spec := range specs { + spec = strings.TrimSpace(spec) + _, ipnet, err := ParseCIDRSloppy(spec) + if err != nil { + return nil, err + } + k := ipnet.String() // In case of normalization + ipnetset[k] = ipnet + } + return ipnetset, nil +} + +// Insert adds items to the set. +func (s IPNetSet) Insert(items ...*net.IPNet) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPNetSet) Delete(items ...*net.IPNet) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPNetSet) Has(item *net.IPNet) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPNetSet) HasAll(items ...*net.IPNet) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPNetSet) Difference(s2 IPNetSet) IPNetSet { + result := make(IPNetSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPNetSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPNetSet) IsSuperset(s2 IPNetSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPNetSet) Equal(s2 IPNetSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPNetSet) Len() int { + return len(s) +} + +// IPSet maps string to net.IP +type IPSet map[string]net.IP + +// ParseIPSet parses string slice to IPSet +func ParseIPSet(items ...string) (IPSet, error) { + ipset := make(IPSet) + for _, item := range items { + ip := ParseIPSloppy(strings.TrimSpace(item)) + if ip == nil { + return nil, fmt.Errorf("error parsing IP %q", item) + } + + ipset[ip.String()] = ip + } + + return ipset, nil +} + +// Insert adds items to the set. +func (s IPSet) Insert(items ...net.IP) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPSet) Delete(items ...net.IP) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPSet) Has(item net.IP) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPSet) HasAll(items ...net.IP) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPSet) Difference(s2 IPSet) IPSet { + result := make(IPSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPSet) IsSuperset(s2 IPSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPSet) Equal(s2 IPSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPSet) Len() int { + return len(s) +} diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go new file mode 100644 index 0000000000000..b7c08e2e003f5 --- /dev/null +++ b/vendor/k8s.io/utils/net/net.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "errors" + "fmt" + "math" + "math/big" + "net" + "strconv" +) + +// ParseCIDRs parses a list of cidrs and return error if any is invalid. +// order is maintained +func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { + cidrs := make([]*net.IPNet, 0, len(cidrsString)) + for _, cidrString := range cidrsString { + _, cidr, err := ParseCIDRSloppy(cidrString) + if err != nil { + return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) + } + cidrs = append(cidrs, cidr) + } + return cidrs, nil +} + +// IsDualStackIPs returns if a slice of ips is: +// - all are valid ips +// - at least one ip from each family (v4 or v6) +func IsDualStackIPs(ips []net.IP) (bool, error) { + v4Found := false + v6Found := false + for _, ip := range ips { + if ip == nil { + return false, fmt.Errorf("ip %v is invalid", ip) + } + + if v4Found && v6Found { + continue + } + + if IsIPv6(ip) { + v6Found = true + continue + } + + v4Found = true + } + + return (v4Found && v6Found), nil +} + +// IsDualStackIPStrings returns if +// - all are valid ips +// - at least one ip from each family (v4 or v6) +func IsDualStackIPStrings(ips []string) (bool, error) { + parsedIPs := make([]net.IP, 0, len(ips)) + for _, ip := range ips { + parsedIP := ParseIPSloppy(ip) + parsedIPs = append(parsedIPs, parsedIP) + } + return IsDualStackIPs(parsedIPs) +} + +// IsDualStackCIDRs returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for _, cidr := range cidrs { + if cidr == nil { + return false, fmt.Errorf("cidr %v is invalid", cidr) + } + + if v4Found && v6Found { + continue + } + + if IsIPv6(cidr.IP) { + v6Found = true + continue + } + v4Found = true + } + + return v4Found && v6Found, nil +} + +// IsDualStackCIDRStrings returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func IsDualStackCIDRStrings(cidrs []string) (bool, error) { + parsedCIDRs, err := ParseCIDRs(cidrs) + if err != nil { + return false, err + } + return IsDualStackCIDRs(parsedCIDRs) +} + +// IsIPv6 returns if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv6String returns if ip is IPv6. +func IsIPv6String(ip string) bool { + netIP := ParseIPSloppy(ip) + return IsIPv6(netIP) +} + +// IsIPv6CIDRString returns if cidr is IPv6. +// This assumes cidr is a valid CIDR. +func IsIPv6CIDRString(cidr string) bool { + ip, _, _ := ParseCIDRSloppy(cidr) + return IsIPv6(ip) +} + +// IsIPv6CIDR returns if a cidr is ipv6 +func IsIPv6CIDR(cidr *net.IPNet) bool { + ip := cidr.IP + return IsIPv6(ip) +} + +// IsIPv4 returns if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// IsIPv4String returns if ip is IPv4. +func IsIPv4String(ip string) bool { + netIP := ParseIPSloppy(ip) + return IsIPv4(netIP) +} + +// IsIPv4CIDR returns if a cidr is ipv4 +func IsIPv4CIDR(cidr *net.IPNet) bool { + ip := cidr.IP + return IsIPv4(ip) +} + +// IsIPv4CIDRString returns if cidr is IPv4. +// This assumes cidr is a valid CIDR. +func IsIPv4CIDRString(cidr string) bool { + ip, _, _ := ParseCIDRSloppy(cidr) + return IsIPv4(ip) +} + +// ParsePort parses a string representing an IP port. If the string is not a +// valid port number, this returns an error. +func ParsePort(port string, allowZero bool) (int, error) { + portInt, err := strconv.ParseUint(port, 10, 16) + if err != nil { + return 0, err + } + if portInt == 0 && !allowZero { + return 0, errors.New("0 is not a valid port number") + } + return int(portInt), nil +} + +// BigForIP creates a big.Int based on the provided net.IP +func BigForIP(ip net.IP) *big.Int { + // NOTE: Convert to 16-byte representation so we can + // handle v4 and v6 values the same way. + return big.NewInt(0).SetBytes(ip.To16()) +} + +// AddIPOffset adds the provided integer offset to a base big.Int representing a net.IP +// NOTE: If you started with a v4 address and overflow it, you get a v6 result. +func AddIPOffset(base *big.Int, offset int) net.IP { + r := big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes() + r = append(make([]byte, 16), r...) + return net.IP(r[len(r)-16:]) +} + +// RangeSize returns the size of a range in valid addresses. +// returns the size of the subnet (or math.MaxInt64 if the range size would overflow int64) +func RangeSize(subnet *net.IPNet) int64 { + ones, bits := subnet.Mask.Size() + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { + return 0 + } + // this checks that we are not overflowing an int64 + if bits-ones >= 63 { + return math.MaxInt64 + } + return int64(1) << uint(bits-ones) +} + +// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space. +func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) { + ip := AddIPOffset(BigForIP(subnet.IP), index) + if !subnet.Contains(ip) { + return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet) + } + return ip, nil +} diff --git a/vendor/k8s.io/utils/net/parse.go b/vendor/k8s.io/utils/net/parse.go new file mode 100644 index 0000000000000..400d364d89f63 --- /dev/null +++ b/vendor/k8s.io/utils/net/parse.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + forkednet "k8s.io/utils/internal/third_party/forked/golang/net" +) + +// ParseIPSloppy is identical to Go's standard net.ParseIP, except that it allows +// leading '0' characters on numbers. Go used to allow this and then changed +// the behavior in 1.17. We're choosing to keep it for compat with potential +// stored values. +var ParseIPSloppy = forkednet.ParseIP + +// ParseCIDRSloppy is identical to Go's standard net.ParseCIDR, except that it allows +// leading '0' characters on numbers. Go used to allow this and then changed +// the behavior in 1.17. We're choosing to keep it for compat with potential +// stored values. +var ParseCIDRSloppy = forkednet.ParseCIDR diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go new file mode 100644 index 0000000000000..7ac04f0dc9834 --- /dev/null +++ b/vendor/k8s.io/utils/net/port.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// IPFamily refers to a specific family if not empty, i.e. "4" or "6". +type IPFamily string + +// Constants for valid IPFamilys: +const ( + IPv4 IPFamily = "4" + IPv6 = "6" +) + +// Protocol is a network protocol support by LocalPort. +type Protocol string + +// Constants for valid protocols: +const ( + TCP Protocol = "TCP" + UDP Protocol = "UDP" +) + +// LocalPort represents an IP address and port pair along with a protocol +// and potentially a specific IP family. +// A LocalPort can be opened and subsequently closed. +type LocalPort struct { + // Description is an arbitrary string. + Description string + // IP is the IP address part of a given local port. + // If this string is empty, the port binds to all local IP addresses. + IP string + // If IPFamily is not empty, the port binds only to addresses of this + // family. + // IF empty along with IP, bind to local addresses of any family. + IPFamily IPFamily + // Port is the port number. + // A value of 0 causes a port to be automatically chosen. + Port int + // Protocol is the protocol, e.g. TCP + Protocol Protocol +} + +// NewLocalPort returns a LocalPort instance and ensures IPFamily and IP are +// consistent and that the given protocol is valid. +func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protocol) (*LocalPort, error) { + if protocol != TCP && protocol != UDP { + return nil, fmt.Errorf("Unsupported protocol %s", protocol) + } + if ipFamily != "" && ipFamily != "4" && ipFamily != "6" { + return nil, fmt.Errorf("Invalid IP family %s", ipFamily) + } + if ip != "" { + parsedIP := ParseIPSloppy(ip) + if parsedIP == nil { + return nil, fmt.Errorf("invalid ip address %s", ip) + } + asIPv4 := parsedIP.To4() + if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 { + return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + } + } + return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil +} + +func (lp *LocalPort) String() string { + ipPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + return fmt.Sprintf("%q (%s/%s%s)", lp.Description, ipPort, strings.ToLower(string(lp.Protocol)), lp.IPFamily) +} + +// Closeable closes an opened LocalPort. +type Closeable interface { + Close() error +} + +// PortOpener can open a LocalPort and allows later closing it. +type PortOpener interface { + OpenLocalPort(lp *LocalPort) (Closeable, error) +} + +type listenPortOpener struct{} + +// ListenPortOpener opens ports by calling bind() and listen(). +var ListenPortOpener listenPortOpener + +// OpenLocalPort holds the given local port open. +func (l *listenPortOpener) OpenLocalPort(lp *LocalPort) (Closeable, error) { + return openLocalPort(lp) +} + +func openLocalPort(lp *LocalPort) (Closeable, error) { + var socket Closeable + hostPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + switch lp.Protocol { + case TCP: + network := "tcp" + string(lp.IPFamily) + listener, err := net.Listen(network, hostPort) + if err != nil { + return nil, err + } + socket = listener + case UDP: + network := "udp" + string(lp.IPFamily) + addr, err := net.ResolveUDPAddr(network, hostPort) + if err != nil { + return nil, err + } + conn, err := net.ListenUDP(network, addr) + if err != nil { + return nil, err + } + socket = conn + default: + return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) + } + return socket, nil +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 2af4967ca03df..3023d1066e305 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -56,7 +56,7 @@ func writeTraceItemSummary(b *bytes.Buffer, msg string, totalTime time.Duration, b.WriteString(" ") } - b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:00.000"))) + b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:05.000"))) } func durationToMilliseconds(timeDuration time.Duration) int64 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 5b7ed3d9b61b3..b016aa5c0e943 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -502,10 +502,11 @@ github.com/google/uuid github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto -# github.com/googleapis/gnostic v0.4.1 +# github.com/googleapis/gnostic v0.5.5 ## explicit; go 1.12 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions +github.com/googleapis/gnostic/jsonschema github.com/googleapis/gnostic/openapiv2 # github.com/gophercloud/gophercloud v0.24.0 ## explicit; go 1.14 @@ -1436,7 +1437,7 @@ gopkg.in/yaml.v3 # inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 ## explicit; go 1.12 inet.af/netaddr -# k8s.io/api v0.22.7 => k8s.io/api v0.21.0 +# k8s.io/api v0.23.6 => k8s.io/api v0.23.6 ## explicit; go 1.16 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 @@ -1449,6 +1450,7 @@ k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 k8s.io/api/autoscaling/v1 +k8s.io/api/autoscaling/v2 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 @@ -1465,6 +1467,7 @@ k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/flowcontrol/v1alpha1 k8s.io/api/flowcontrol/v1beta1 +k8s.io/api/flowcontrol/v1beta2 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1 @@ -1481,7 +1484,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.22.7 => k8s.io/apimachinery v0.21.0 +# k8s.io/apimachinery v0.23.6 => k8s.io/apimachinery v0.23.6 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1505,7 +1508,6 @@ k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer @@ -1524,7 +1526,7 @@ k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.21.0 +# k8s.io/client-go v0.23.6 => k8s.io/client-go v0.23.6 ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1533,6 +1535,7 @@ k8s.io/client-go/applyconfigurations/apps/v1 k8s.io/client-go/applyconfigurations/apps/v1beta1 k8s.io/client-go/applyconfigurations/apps/v1beta2 k8s.io/client-go/applyconfigurations/autoscaling/v1 +k8s.io/client-go/applyconfigurations/autoscaling/v2 k8s.io/client-go/applyconfigurations/autoscaling/v2beta1 k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 @@ -1549,6 +1552,7 @@ k8s.io/client-go/applyconfigurations/events/v1beta1 k8s.io/client-go/applyconfigurations/extensions/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 +k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 @@ -1581,6 +1585,7 @@ k8s.io/client-go/kubernetes/typed/authentication/v1beta1 k8s.io/client-go/kubernetes/typed/authorization/v1 k8s.io/client-go/kubernetes/typed/authorization/v1beta1 k8s.io/client-go/kubernetes/typed/autoscaling/v1 +k8s.io/client-go/kubernetes/typed/autoscaling/v2 k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 k8s.io/client-go/kubernetes/typed/batch/v1 @@ -1597,6 +1602,7 @@ k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/node/v1 @@ -1614,6 +1620,8 @@ k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/install +k8s.io/client-go/pkg/apis/clientauthentication/v1 k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version @@ -1642,16 +1650,28 @@ k8s.io/klog # k8s.io/klog/v2 v2.40.1 ## explicit; go 1.13 k8s.io/klog/v2 -# k8s.io/utils v0.0.0-20201110183641-67b214c5f920 +# k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 +## explicit; go 1.16 +k8s.io/kube-openapi/pkg/schemaconv +k8s.io/kube-openapi/pkg/util/proto +# k8s.io/utils v0.0.0-20211116205334-6203023598ed ## explicit; go 1.12 k8s.io/utils/buffer +k8s.io/utils/clock +k8s.io/utils/clock/testing k8s.io/utils/integer +k8s.io/utils/internal/third_party/forked/golang/net +k8s.io/utils/net k8s.io/utils/trace # rsc.io/binaryregexp v0.2.0 ## explicit; go 1.12 rsc.io/binaryregexp rsc.io/binaryregexp/syntax -# sigs.k8s.io/structured-merge-diff/v4 v4.1.0 +# sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 +## explicit; go 1.16 +sigs.k8s.io/json +sigs.k8s.io/json/internal/golang/encoding/json +# sigs.k8s.io/structured-merge-diff/v4 v4.2.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/schema @@ -1664,9 +1684,9 @@ sigs.k8s.io/yaml # github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c # github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible # github.com/Azure/azure-storage-blob-go => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e -# k8s.io/client-go => k8s.io/client-go v0.21.0 -# k8s.io/api => k8s.io/api v0.21.0 -# k8s.io/apimachinery => k8s.io/apimachinery v0.21.0 +# k8s.io/client-go => k8s.io/client-go v0.23.6 +# k8s.io/api => k8s.io/api v0.23.6 +# k8s.io/apimachinery => k8s.io/apimachinery v0.23.6 # github.com/hashicorp/consul => github.com/hashicorp/consul v1.5.1 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab diff --git a/vendor/sigs.k8s.io/json/CONTRIBUTING.md b/vendor/sigs.k8s.io/json/CONTRIBUTING.md new file mode 100644 index 0000000000000..3bcd26689f38f --- /dev/null +++ b/vendor/sigs.k8s.io/json/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Criteria for adding code here + +This library adapts the stdlib `encoding/json` decoder to be compatible with +Kubernetes JSON decoding, and is not expected to actively add new features. + +It may be updated with changes from the stdlib `encoding/json` decoder. + +Any code that is added must: +* Have full unit test and benchmark coverage +* Be backward compatible with the existing exposed go API +* Have zero external dependencies +* Preserve existing benchmark performance +* Preserve compatibility with existing decoding behavior of `UnmarshalCaseSensitivePreserveInts()` or `UnmarshalStrict()` +* Avoid use of `unsafe` + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers + +## Community, discussion, contribution, and support + +You can reach the maintainers of this project via the +[sig-api-machinery mailing list / channels](https://github.com/kubernetes/community/tree/master/sig-api-machinery#contact). + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + diff --git a/vendor/sigs.k8s.io/json/LICENSE b/vendor/sigs.k8s.io/json/LICENSE new file mode 100644 index 0000000000000..e5adf7f0c0c34 --- /dev/null +++ b/vendor/sigs.k8s.io/json/LICENSE @@ -0,0 +1,238 @@ +Files other than internal/golang/* licensed under: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +------------------ + +internal/golang/* files licensed under: + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile new file mode 100644 index 0000000000000..07b8bfa857f59 --- /dev/null +++ b/vendor/sigs.k8s.io/json/Makefile @@ -0,0 +1,35 @@ +.PHONY: default build test benchmark fmt vet + +default: build + +build: + go build ./... + +test: + go test sigs.k8s.io/json/... + +benchmark: + go test sigs.k8s.io/json -bench . -benchmem + +fmt: + go mod tidy + gofmt -s -w *.go + +vet: + go vet sigs.k8s.io/json + + @echo "checking for external dependencies" + @deps=$$(go mod graph); \ + if [ -n "$${deps}" ]; then \ + echo "only stdlib dependencies allowed, found:"; \ + echo "$${deps}"; \ + exit 1; \ + fi + + @echo "checking for unsafe use" + @unsafe=$$(go list -f '{{.ImportPath}} depends on {{.Imports}}' sigs.k8s.io/json/... | grep unsafe || true); \ + if [ -n "$${unsafe}" ]; then \ + echo "no dependencies on unsafe allowed, found:"; \ + echo "$${unsafe}"; \ + exit 1; \ + fi diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS new file mode 100644 index 0000000000000..0fadafbddbfd7 --- /dev/null +++ b/vendor/sigs.k8s.io/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - deads2k + - lavalamp + - liggitt diff --git a/vendor/sigs.k8s.io/json/README.md b/vendor/sigs.k8s.io/json/README.md new file mode 100644 index 0000000000000..0ef8f51c21fea --- /dev/null +++ b/vendor/sigs.k8s.io/json/README.md @@ -0,0 +1,40 @@ +# sigs.k8s.io/json + +[![Go Reference](https://pkg.go.dev/badge/sigs.k8s.io/json.svg)](https://pkg.go.dev/sigs.k8s.io/json) + +## Introduction + +This library is a subproject of [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery#json). +It provides case-sensitive, integer-preserving JSON unmarshaling functions based on `encoding/json` `Unmarshal()`. + +## Compatibility + +The `UnmarshalCaseSensitivePreserveInts()` function behaves like `encoding/json#Unmarshal()` with the following differences: + +- JSON object keys are treated case-sensitively. + Object keys must exactly match json tag names (for tagged struct fields) + or struct field names (for untagged struct fields). +- JSON integers are unmarshaled into `interface{}` fields as an `int64` instead of a + `float64` when possible, falling back to `float64` on any parse or overflow error. +- Syntax errors do not return an `encoding/json` `*SyntaxError` error. + Instead, they return an error which can be passed to `SyntaxErrorOffset()` to obtain an offset. + +## Additional capabilities + +The `UnmarshalStrict()` function decodes identically to `UnmarshalCaseSensitivePreserveInts()`, +and also returns non-fatal strict errors encountered while decoding: + +- Duplicate fields encountered +- Unknown fields encountered + +### Community, discussion, contribution, and support + +You can reach the maintainers of this project via the +[sig-api-machinery mailing list / channels](https://github.com/kubernetes/community/tree/master/sig-api-machinery#contact). + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +[owners]: https://git.k8s.io/community/contributors/guide/owners.md +[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE diff --git a/vendor/sigs.k8s.io/json/SECURITY.md b/vendor/sigs.k8s.io/json/SECURITY.md new file mode 100644 index 0000000000000..2083d44cdf90a --- /dev/null +++ b/vendor/sigs.k8s.io/json/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Security Announcements + +Join the [kubernetes-security-announce] group for security and vulnerability announcements. + +You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. + +## Reporting a Vulnerability + +Instructions for reporting a vulnerability can be found on the +[Kubernetes Security and Disclosure Information] page. + +## Supported Versions + +Information about supported Kubernetes versions can be found on the +[Kubernetes version and version skew support policy] page on the Kubernetes website. + +[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce +[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 +[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions +[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability diff --git a/vendor/sigs.k8s.io/json/SECURITY_CONTACTS b/vendor/sigs.k8s.io/json/SECURITY_CONTACTS new file mode 100644 index 0000000000000..6a51db2fe8645 --- /dev/null +++ b/vendor/sigs.k8s.io/json/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +deads2k +lavalamp +liggitt diff --git a/vendor/sigs.k8s.io/json/code-of-conduct.md b/vendor/sigs.k8s.io/json/code-of-conduct.md new file mode 100644 index 0000000000000..0d15c00cf3252 --- /dev/null +++ b/vendor/sigs.k8s.io/json/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/json/doc.go b/vendor/sigs.k8s.io/json/doc.go new file mode 100644 index 0000000000000..050eebb03e0d4 --- /dev/null +++ b/vendor/sigs.k8s.io/json/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json // import "sigs.k8s.io/json" diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go new file mode 100644 index 0000000000000..a047d981bfea2 --- /dev/null +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -0,0 +1,1402 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}, opts ...UnmarshalOpt) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + + for _, opt := range opts { + opt(&d) + } + + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +/* +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} +*/ + +func (d *decodeState) unmarshal(v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + if d.savedError != nil { + return d.savedError + } + if len(d.savedStrictErrors) > 0 { + return &UnmarshalStrictError{Errors: d.savedStrictErrors} + } + return nil +} + +/* +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} +*/ + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + + savedStrictErrors []error + seenStrictErrors map[string]struct{} + + caseSensitive bool + + preserveInts bool + + disallowDuplicateFields bool +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + var checkDuplicateField func(fieldNameIndex int, fieldName string) + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + + if d.disallowDuplicateFields { + var seenKeys map[string]struct{} + checkDuplicateField = func(fieldNameIndex int, fieldName string) { + if seenKeys == nil { + seenKeys = map[string]struct{}{} + } + if _, seen := seenKeys[fieldName]; seen { + d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName)) + } else { + seenKeys[fieldName] = struct{}{} + } + } + } + + case reflect.Struct: + fields = cachedTypeFields(t) + + if d.disallowDuplicateFields { + if len(fields.list) <= 64 { + // bitset by field index for structs with <= 64 fields + var seenKeys uint64 + checkDuplicateField = func(fieldNameIndex int, fieldName string) { + if seenKeys&(1< '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val interface{}) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if d.disallowDuplicateFields { + if _, exists := m[key]; exists { + d.saveStrictError(fmt.Errorf("duplicate field %q", key)) + } + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go new file mode 100644 index 0000000000000..e473e615a9ed4 --- /dev/null +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -0,0 +1,1419 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML